2026-03-07T10:26:40.759 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-07T10:26:40.765 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-07T10:26:40.786 INFO:teuthology.run:Config: archive_path: /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/18 branch: cobaltcore-storage-v19.2.3-fasttrack-5 description: orch:cephadm:workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_extra_daemon_features} email: null first_in_suite: false flavor: default job_id: '18' last_in_suite: false machine_type: vps name: irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: cobaltcore-storage-v19.2.3-fasttrack-5 ansible.cephlab: branch: main repo: https://github.com/kshtsk/ceph-cm-ansible.git skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: global: mon election default strategy: 3 mgr: debug mgr: 20 debug ms: 1 mgr/cephadm/use_agent: true mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} cephadm: cephadm_binary_url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm containers: image: harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 install: ceph: flavor: default sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a extra_system_packages: deb: - python3-xmltodict - s3cmd rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - s3cmd repos: - name: ceph-source priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/SRPMS - name: ceph-noarch priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/noarch - name: ceph priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/x86_64 selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 workunit: branch: tt-fasttrack-5-workunits sha1: f96e33505a05da25eb24b46ae34fbbd1718a702b owner: irq0 priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - mon.a - mgr.a - osd.0 - - host.b - mon.b - mgr.b - osd.1 seed: 8363 sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a sleep_before_teardown: 0 subset: 1/64 suite: orch:cephadm:workunits suite_branch: tt-fasttrack-5-workunits suite_path: /home/teuthos/src/github.com_kshtsk_ceph_f96e33505a05da25eb24b46ae34fbbd1718a702b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: f96e33505a05da25eb24b46ae34fbbd1718a702b targets: vm08.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAo7hpJ5YBOWeNiEM2JKetbr9Kii80C0lYTq39lx5pXyMIZcZm1V38TJICC+keGiAiRJpnCHEZGN2KAFxCVVn68= vm09.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC3PSTCMaMQJWdQl2g4FgEDMoOPhAWKorcqYcJWZbX4hWKquvnEFdfsxOMN+VqjPXCy4qAyiBSSlMTnjm56QtKg= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install runc nvmetcli nvme-cli -y - sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf - sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf - cephadm: null - exec: all-hosts: - mkdir /etc/cephadm_testing - cephadm.apply: specs: - extra_container_args: - --cpus=2 extra_entrypoint_args: - --debug_ms 10 placement: host_pattern: '*' service_type: mon - custom_configs: - content: "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n\ \ ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\n\ sleep infinity\n" mount_path: /root/write_thing_to_file.sh extra_container_args: - -v - /etc/cephadm_testing:/root/cephadm_testing extra_entrypoint_args: - /root/write_thing_to_file.sh - -c - testing_custom_containers - -o - /root/cephadm_testing/testing.txt placement: host_pattern: '*' service_id: foo service_type: container spec: entrypoint: bash image: quay.io/fedora/fedora:latest - custom_configs: - content: 'set -e test -f /var/cache/bar/from.txt test -f /var/cache/bar/presized.dat echo ok > /var/cache/bar/primary.txt sleep infinity ' mount_path: /root/init_check.sh extra_entrypoint_args: - /root/init_check.sh placement: host_pattern: '*' service_id: bar service_type: container spec: dirs: - data entrypoint: bash image: quay.io/fedora/fedora:latest init_containers: - entrypoint: bash entrypoint_args: - argument: -c - argument: . /etc/os-release && echo from=$ID > /var/cache/bar/from.txt image: quay.io/centos/centos:latest volume_mounts: data: /var/cache/bar:z - entrypoint: bash entrypoint_args: - argument: -c - argument: test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat volume_mounts: data: /var/cache/bar:z volume_mounts: data: /var/cache/bar:z - cephadm.wait_for_service: service: mon - cephadm.wait_for_service: service: container.foo - cephadm.wait_for_service: service: container.bar - exec: host.a: - 'set -ex FSID=$(/home/ubuntu/cephtest/cephadm shell -- ceph fsid) sleep 60 # check extra container and entrypoint args written to mon unit run file grep "\-\-cpus=2" /var/lib/ceph/$FSID/mon.*/unit.run grep "\-\-debug_ms 10" /var/lib/ceph/$FSID/mon.*/unit.run # check that custom container properly wrote content to file. # This requires the custom config, extra container args, and # entrypoint args to all be working in order for this to have # been written. The container entrypoint was set up with custom_configs, # the content and where to write to with the entrypoint args, and the mounting # of the /etc/cephadm_testing dir with extra container args grep "testing_custom_containers" /etc/cephadm_testing/testing.txt # Verify that container bar''s init containers and primary container # ran successfully dir=$(find /var/lib/ceph/$FSID -maxdepth 1 -type d -name ''container.bar.*'') test -n "$dir" grep ok ${dir}/data/primary.txt grep from=centos ${dir}/data/from.txt test -s ${dir}/data/presized.dat ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-07_10:02:54 tube: vps user: irq0 verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.2764 2026-03-07T10:26:40.786 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_f96e33505a05da25eb24b46ae34fbbd1718a702b/qa; will attempt to use it 2026-03-07T10:26:40.787 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_f96e33505a05da25eb24b46ae34fbbd1718a702b/qa/tasks 2026-03-07T10:26:40.787 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-07T10:26:40.787 INFO:teuthology.task.internal:Saving configuration 2026-03-07T10:26:40.793 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-07T10:26:40.794 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-07T10:26:40.800 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm08.local', 'description': '/archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/18', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-07 10:25:03.087474', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:08', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAo7hpJ5YBOWeNiEM2JKetbr9Kii80C0lYTq39lx5pXyMIZcZm1V38TJICC+keGiAiRJpnCHEZGN2KAFxCVVn68='} 2026-03-07T10:26:40.805 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm09.local', 'description': '/archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/18', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-07 10:25:03.087919', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:09', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC3PSTCMaMQJWdQl2g4FgEDMoOPhAWKorcqYcJWZbX4hWKquvnEFdfsxOMN+VqjPXCy4qAyiBSSlMTnjm56QtKg='} 2026-03-07T10:26:40.805 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-07T10:26:40.805 INFO:teuthology.task.internal:roles: ubuntu@vm08.local - ['host.a', 'mon.a', 'mgr.a', 'osd.0'] 2026-03-07T10:26:40.805 INFO:teuthology.task.internal:roles: ubuntu@vm09.local - ['host.b', 'mon.b', 'mgr.b', 'osd.1'] 2026-03-07T10:26:40.805 INFO:teuthology.run_tasks:Running task console_log... 2026-03-07T10:26:40.811 DEBUG:teuthology.task.console_log:vm08 does not support IPMI; excluding 2026-03-07T10:26:40.816 DEBUG:teuthology.task.console_log:vm09 does not support IPMI; excluding 2026-03-07T10:26:40.816 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7f7dc0abbd90>, signals=[15]) 2026-03-07T10:26:40.816 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-07T10:26:40.816 INFO:teuthology.task.internal:Opening connections... 2026-03-07T10:26:40.816 DEBUG:teuthology.task.internal:connecting to ubuntu@vm08.local 2026-03-07T10:26:40.817 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm08.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:26:40.877 DEBUG:teuthology.task.internal:connecting to ubuntu@vm09.local 2026-03-07T10:26:40.878 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:26:40.935 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-07T10:26:40.936 DEBUG:teuthology.orchestra.run.vm08:> uname -m 2026-03-07T10:26:40.984 INFO:teuthology.orchestra.run.vm08.stdout:x86_64 2026-03-07T10:26:40.985 DEBUG:teuthology.orchestra.run.vm08:> cat /etc/os-release 2026-03-07T10:26:41.038 INFO:teuthology.orchestra.run.vm08.stdout:NAME="CentOS Stream" 2026-03-07T10:26:41.038 INFO:teuthology.orchestra.run.vm08.stdout:VERSION="9" 2026-03-07T10:26:41.038 INFO:teuthology.orchestra.run.vm08.stdout:ID="centos" 2026-03-07T10:26:41.039 INFO:teuthology.orchestra.run.vm08.stdout:ID_LIKE="rhel fedora" 2026-03-07T10:26:41.039 INFO:teuthology.orchestra.run.vm08.stdout:VERSION_ID="9" 2026-03-07T10:26:41.039 INFO:teuthology.orchestra.run.vm08.stdout:PLATFORM_ID="platform:el9" 2026-03-07T10:26:41.039 INFO:teuthology.orchestra.run.vm08.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-07T10:26:41.039 INFO:teuthology.orchestra.run.vm08.stdout:ANSI_COLOR="0;31" 2026-03-07T10:26:41.039 INFO:teuthology.orchestra.run.vm08.stdout:LOGO="fedora-logo-icon" 2026-03-07T10:26:41.039 INFO:teuthology.orchestra.run.vm08.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-07T10:26:41.039 INFO:teuthology.orchestra.run.vm08.stdout:HOME_URL="https://centos.org/" 2026-03-07T10:26:41.039 INFO:teuthology.orchestra.run.vm08.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-07T10:26:41.039 INFO:teuthology.orchestra.run.vm08.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-07T10:26:41.039 INFO:teuthology.orchestra.run.vm08.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-07T10:26:41.039 INFO:teuthology.lock.ops:Updating vm08.local on lock server 2026-03-07T10:26:41.043 DEBUG:teuthology.orchestra.run.vm09:> uname -m 2026-03-07T10:26:41.059 INFO:teuthology.orchestra.run.vm09.stdout:x86_64 2026-03-07T10:26:41.059 DEBUG:teuthology.orchestra.run.vm09:> cat /etc/os-release 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:NAME="CentOS Stream" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:VERSION="9" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:ID="centos" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:ID_LIKE="rhel fedora" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:VERSION_ID="9" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:PLATFORM_ID="platform:el9" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:ANSI_COLOR="0;31" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:LOGO="fedora-logo-icon" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:HOME_URL="https://centos.org/" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-07T10:26:41.114 INFO:teuthology.orchestra.run.vm09.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-07T10:26:41.114 INFO:teuthology.lock.ops:Updating vm09.local on lock server 2026-03-07T10:26:41.119 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-07T10:26:41.120 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-07T10:26:41.121 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-07T10:26:41.121 DEBUG:teuthology.orchestra.run.vm08:> test '!' -e /home/ubuntu/cephtest 2026-03-07T10:26:41.123 DEBUG:teuthology.orchestra.run.vm09:> test '!' -e /home/ubuntu/cephtest 2026-03-07T10:26:41.168 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-07T10:26:41.169 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-07T10:26:41.169 DEBUG:teuthology.orchestra.run.vm08:> test -z $(ls -A /var/lib/ceph) 2026-03-07T10:26:41.177 DEBUG:teuthology.orchestra.run.vm09:> test -z $(ls -A /var/lib/ceph) 2026-03-07T10:26:41.190 INFO:teuthology.orchestra.run.vm08.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-07T10:26:41.223 INFO:teuthology.orchestra.run.vm09.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-07T10:26:41.224 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-07T10:26:41.231 DEBUG:teuthology.orchestra.run.vm08:> test -e /ceph-qa-ready 2026-03-07T10:26:41.245 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:26:41.445 DEBUG:teuthology.orchestra.run.vm09:> test -e /ceph-qa-ready 2026-03-07T10:26:41.458 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:26:41.644 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-07T10:26:41.645 INFO:teuthology.task.internal:Creating test directory... 2026-03-07T10:26:41.645 DEBUG:teuthology.orchestra.run.vm08:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-07T10:26:41.647 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-07T10:26:41.663 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-07T10:26:41.664 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-07T10:26:41.665 INFO:teuthology.task.internal:Creating archive directory... 2026-03-07T10:26:41.665 DEBUG:teuthology.orchestra.run.vm08:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-07T10:26:41.704 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-07T10:26:41.721 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-07T10:26:41.722 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-07T10:26:41.722 DEBUG:teuthology.orchestra.run.vm08:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-07T10:26:41.773 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:26:41.774 DEBUG:teuthology.orchestra.run.vm09:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-07T10:26:41.787 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:26:41.787 DEBUG:teuthology.orchestra.run.vm08:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-07T10:26:41.816 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-07T10:26:41.837 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:26:41.845 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:26:41.851 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:26:41.859 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:26:41.861 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-07T10:26:41.862 INFO:teuthology.task.internal:Configuring sudo... 2026-03-07T10:26:41.862 DEBUG:teuthology.orchestra.run.vm08:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-07T10:26:41.888 DEBUG:teuthology.orchestra.run.vm09:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-07T10:26:41.923 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-07T10:26:41.925 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-07T10:26:41.925 DEBUG:teuthology.orchestra.run.vm08:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-07T10:26:41.953 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-07T10:26:41.977 DEBUG:teuthology.orchestra.run.vm08:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:26:42.028 DEBUG:teuthology.orchestra.run.vm08:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:26:42.084 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:26:42.084 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-07T10:26:42.142 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:26:42.162 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:26:42.221 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-07T10:26:42.221 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-07T10:26:42.281 DEBUG:teuthology.orchestra.run.vm08:> sudo service rsyslog restart 2026-03-07T10:26:42.283 DEBUG:teuthology.orchestra.run.vm09:> sudo service rsyslog restart 2026-03-07T10:26:42.311 INFO:teuthology.orchestra.run.vm08.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:26:42.350 INFO:teuthology.orchestra.run.vm09.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:26:42.617 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-07T10:26:42.619 INFO:teuthology.task.internal:Starting timer... 2026-03-07T10:26:42.619 INFO:teuthology.run_tasks:Running task pcp... 2026-03-07T10:26:42.621 INFO:teuthology.run_tasks:Running task selinux... 2026-03-07T10:26:42.624 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0']} 2026-03-07T10:26:42.624 INFO:teuthology.task.selinux:Excluding vm08: VMs are not yet supported 2026-03-07T10:26:42.624 INFO:teuthology.task.selinux:Excluding vm09: VMs are not yet supported 2026-03-07T10:26:42.624 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-07T10:26:42.624 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-07T10:26:42.624 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-07T10:26:42.624 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-07T10:26:42.625 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'repo': 'https://github.com/kshtsk/ceph-cm-ansible.git', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-07T10:26:42.626 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/kshtsk/ceph-cm-ansible.git 2026-03-07T10:26:42.627 INFO:teuthology.repo_utils:Fetching github.com_kshtsk_ceph-cm-ansible_main from origin 2026-03-07T10:26:43.188 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main to origin/main 2026-03-07T10:26:43.194 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-07T10:26:43.194 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventoryg33rasq6 --limit vm08.local,vm09.local /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-07T10:28:25.561 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm08.local'), Remote(name='ubuntu@vm09.local')] 2026-03-07T10:28:25.561 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm08.local' 2026-03-07T10:28:25.562 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm08.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:28:25.627 DEBUG:teuthology.orchestra.run.vm08:> true 2026-03-07T10:28:25.707 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm08.local' 2026-03-07T10:28:25.707 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm09.local' 2026-03-07T10:28:25.707 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:28:25.768 DEBUG:teuthology.orchestra.run.vm09:> true 2026-03-07T10:28:25.844 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm09.local' 2026-03-07T10:28:25.844 INFO:teuthology.run_tasks:Running task clock... 2026-03-07T10:28:25.846 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-07T10:28:25.846 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-07T10:28:25.846 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:28:25.848 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-07T10:28:25.848 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:28:25.883 INFO:teuthology.orchestra.run.vm08.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-07T10:28:25.898 INFO:teuthology.orchestra.run.vm08.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-07T10:28:25.924 INFO:teuthology.orchestra.run.vm09.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-07T10:28:25.930 INFO:teuthology.orchestra.run.vm08.stderr:sudo: ntpd: command not found 2026-03-07T10:28:25.943 INFO:teuthology.orchestra.run.vm09.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-07T10:28:25.943 INFO:teuthology.orchestra.run.vm08.stdout:506 Cannot talk to daemon 2026-03-07T10:28:25.961 INFO:teuthology.orchestra.run.vm08.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-07T10:28:25.974 INFO:teuthology.orchestra.run.vm09.stderr:sudo: ntpd: command not found 2026-03-07T10:28:25.981 INFO:teuthology.orchestra.run.vm08.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-07T10:28:25.984 INFO:teuthology.orchestra.run.vm09.stdout:506 Cannot talk to daemon 2026-03-07T10:28:26.000 INFO:teuthology.orchestra.run.vm09.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-07T10:28:26.015 INFO:teuthology.orchestra.run.vm09.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-07T10:28:26.031 INFO:teuthology.orchestra.run.vm08.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:28:26.033 INFO:teuthology.orchestra.run.vm08.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:28:26.033 INFO:teuthology.orchestra.run.vm08.stdout:=============================================================================== 2026-03-07T10:28:26.067 INFO:teuthology.orchestra.run.vm09.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:28:26.068 INFO:teuthology.orchestra.run.vm09.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:28:26.068 INFO:teuthology.orchestra.run.vm09.stdout:=============================================================================== 2026-03-07T10:28:26.069 INFO:teuthology.run_tasks:Running task pexec... 2026-03-07T10:28:26.074 INFO:teuthology.task.pexec:Executing custom commands... 2026-03-07T10:28:26.074 DEBUG:teuthology.orchestra.run.vm08:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-07T10:28:26.074 DEBUG:teuthology.orchestra.run.vm09:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-07T10:28:26.076 DEBUG:teuthology.task.pexec:ubuntu@vm08.local< sudo dnf remove nvme-cli -y 2026-03-07T10:28:26.076 DEBUG:teuthology.task.pexec:ubuntu@vm08.local< sudo dnf install runc nvmetcli nvme-cli -y 2026-03-07T10:28:26.076 DEBUG:teuthology.task.pexec:ubuntu@vm08.local< sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-07T10:28:26.076 DEBUG:teuthology.task.pexec:ubuntu@vm08.local< sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-07T10:28:26.076 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm08.local 2026-03-07T10:28:26.076 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-07T10:28:26.076 INFO:teuthology.task.pexec:sudo dnf install runc nvmetcli nvme-cli -y 2026-03-07T10:28:26.076 INFO:teuthology.task.pexec:sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-07T10:28:26.076 INFO:teuthology.task.pexec:sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-07T10:28:26.110 DEBUG:teuthology.task.pexec:ubuntu@vm09.local< sudo dnf remove nvme-cli -y 2026-03-07T10:28:26.111 DEBUG:teuthology.task.pexec:ubuntu@vm09.local< sudo dnf install runc nvmetcli nvme-cli -y 2026-03-07T10:28:26.111 DEBUG:teuthology.task.pexec:ubuntu@vm09.local< sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-07T10:28:26.111 DEBUG:teuthology.task.pexec:ubuntu@vm09.local< sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-07T10:28:26.111 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm09.local 2026-03-07T10:28:26.111 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-07T10:28:26.111 INFO:teuthology.task.pexec:sudo dnf install runc nvmetcli nvme-cli -y 2026-03-07T10:28:26.111 INFO:teuthology.task.pexec:sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-07T10:28:26.111 INFO:teuthology.task.pexec:sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-07T10:28:26.308 INFO:teuthology.orchestra.run.vm08.stdout:No match for argument: nvme-cli 2026-03-07T10:28:26.308 INFO:teuthology.orchestra.run.vm08.stderr:No packages marked for removal. 2026-03-07T10:28:26.311 INFO:teuthology.orchestra.run.vm08.stdout:Dependencies resolved. 2026-03-07T10:28:26.312 INFO:teuthology.orchestra.run.vm08.stdout:Nothing to do. 2026-03-07T10:28:26.312 INFO:teuthology.orchestra.run.vm08.stdout:Complete! 2026-03-07T10:28:26.330 INFO:teuthology.orchestra.run.vm09.stdout:No match for argument: nvme-cli 2026-03-07T10:28:26.330 INFO:teuthology.orchestra.run.vm09.stderr:No packages marked for removal. 2026-03-07T10:28:26.335 INFO:teuthology.orchestra.run.vm09.stdout:Dependencies resolved. 2026-03-07T10:28:26.336 INFO:teuthology.orchestra.run.vm09.stdout:Nothing to do. 2026-03-07T10:28:26.336 INFO:teuthology.orchestra.run.vm09.stdout:Complete! 2026-03-07T10:28:26.767 INFO:teuthology.orchestra.run.vm09.stdout:Last metadata expiration check: 0:01:12 ago on Sat 07 Mar 2026 10:27:14 AM UTC. 2026-03-07T10:28:26.774 INFO:teuthology.orchestra.run.vm08.stdout:Last metadata expiration check: 0:00:57 ago on Sat 07 Mar 2026 10:27:29 AM UTC. 2026-03-07T10:28:26.880 INFO:teuthology.orchestra.run.vm09.stdout:Dependencies resolved. 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout: Package Arch Version Repository Size 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout:Installing: 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout: runc x86_64 4:1.4.0-2.el9 appstream 4.0 M 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout:Installing dependencies: 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout:Transaction Summary 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout:================================================================================ 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout:Install 7 Packages 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout:Total download size: 6.3 M 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout:Installed size: 24 M 2026-03-07T10:28:26.881 INFO:teuthology.orchestra.run.vm09.stdout:Downloading Packages: 2026-03-07T10:28:26.891 INFO:teuthology.orchestra.run.vm08.stdout:Dependencies resolved. 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout:================================================================================ 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout: Package Arch Version Repository Size 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout:================================================================================ 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout:Installing: 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout: runc x86_64 4:1.4.0-2.el9 appstream 4.0 M 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout:Installing dependencies: 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout:Transaction Summary 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout:================================================================================ 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout:Install 7 Packages 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout:Total download size: 6.3 M 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout:Installed size: 24 M 2026-03-07T10:28:26.892 INFO:teuthology.orchestra.run.vm08.stdout:Downloading Packages: 2026-03-07T10:28:27.695 INFO:teuthology.orchestra.run.vm08.stdout:(1/7): nvmetcli-0.8-3.el9.noarch.rpm 328 kB/s | 44 kB 00:00 2026-03-07T10:28:27.705 INFO:teuthology.orchestra.run.vm08.stdout:(2/7): python3-configshell-1.1.30-1.el9.noarch. 497 kB/s | 72 kB 00:00 2026-03-07T10:28:27.746 INFO:teuthology.orchestra.run.vm08.stdout:(3/7): python3-kmod-0.9-32.el9.x86_64.rpm 1.6 MB/s | 84 kB 00:00 2026-03-07T10:28:27.765 INFO:teuthology.orchestra.run.vm08.stdout:(4/7): python3-pyparsing-2.4.7-9.el9.noarch.rpm 2.5 MB/s | 150 kB 00:00 2026-03-07T10:28:27.795 INFO:teuthology.orchestra.run.vm08.stdout:(5/7): nvme-cli-2.16-1.el9.x86_64.rpm 4.9 MB/s | 1.2 MB 00:00 2026-03-07T10:28:27.816 INFO:teuthology.orchestra.run.vm09.stdout:(1/7): nvmetcli-0.8-3.el9.noarch.rpm 154 kB/s | 44 kB 00:00 2026-03-07T10:28:27.838 INFO:teuthology.orchestra.run.vm08.stdout:(6/7): python3-urwid-2.1.2-4.el9.x86_64.rpm 8.9 MB/s | 837 kB 00:00 2026-03-07T10:28:28.127 INFO:teuthology.orchestra.run.vm09.stdout:(2/7): python3-configshell-1.1.30-1.el9.noarch. 121 kB/s | 72 kB 00:00 2026-03-07T10:28:28.221 INFO:teuthology.orchestra.run.vm08.stdout:(7/7): runc-1.4.0-2.el9.x86_64.rpm 8.7 MB/s | 4.0 MB 00:00 2026-03-07T10:28:28.221 INFO:teuthology.orchestra.run.vm08.stdout:-------------------------------------------------------------------------------- 2026-03-07T10:28:28.221 INFO:teuthology.orchestra.run.vm08.stdout:Total 4.7 MB/s | 6.3 MB 00:01 2026-03-07T10:28:28.234 INFO:teuthology.orchestra.run.vm09.stdout:(3/7): python3-kmod-0.9-32.el9.x86_64.rpm 202 kB/s | 84 kB 00:00 2026-03-07T10:28:28.312 INFO:teuthology.orchestra.run.vm08.stdout:Running transaction check 2026-03-07T10:28:28.317 INFO:teuthology.orchestra.run.vm09.stdout:(4/7): nvme-cli-2.16-1.el9.x86_64.rpm 1.5 MB/s | 1.2 MB 00:00 2026-03-07T10:28:28.323 INFO:teuthology.orchestra.run.vm08.stdout:Transaction check succeeded. 2026-03-07T10:28:28.324 INFO:teuthology.orchestra.run.vm08.stdout:Running transaction test 2026-03-07T10:28:28.398 INFO:teuthology.orchestra.run.vm08.stdout:Transaction test succeeded. 2026-03-07T10:28:28.399 INFO:teuthology.orchestra.run.vm08.stdout:Running transaction 2026-03-07T10:28:28.600 INFO:teuthology.orchestra.run.vm08.stdout: Preparing : 1/1 2026-03-07T10:28:28.613 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/7 2026-03-07T10:28:28.623 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/7 2026-03-07T10:28:28.626 INFO:teuthology.orchestra.run.vm09.stdout:(5/7): python3-pyparsing-2.4.7-9.el9.noarch.rpm 302 kB/s | 150 kB 00:00 2026-03-07T10:28:28.633 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-07T10:28:28.640 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-07T10:28:28.645 INFO:teuthology.orchestra.run.vm08.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/7 2026-03-07T10:28:28.702 INFO:teuthology.orchestra.run.vm08.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/7 2026-03-07T10:28:28.816 INFO:teuthology.orchestra.run.vm09.stdout:(6/7): runc-1.4.0-2.el9.x86_64.rpm 7.9 MB/s | 4.0 MB 00:00 2026-03-07T10:28:28.866 INFO:teuthology.orchestra.run.vm08.stdout: Installing : runc-4:1.4.0-2.el9.x86_64 6/7 2026-03-07T10:28:28.872 INFO:teuthology.orchestra.run.vm08.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-07T10:28:29.034 INFO:teuthology.orchestra.run.vm09.stdout:(7/7): python3-urwid-2.1.2-4.el9.x86_64.rpm 1.0 MB/s | 837 kB 00:00 2026-03-07T10:28:29.034 INFO:teuthology.orchestra.run.vm09.stdout:-------------------------------------------------------------------------------- 2026-03-07T10:28:29.034 INFO:teuthology.orchestra.run.vm09.stdout:Total 2.9 MB/s | 6.3 MB 00:02 2026-03-07T10:28:29.114 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction check 2026-03-07T10:28:29.122 INFO:teuthology.orchestra.run.vm09.stdout:Transaction check succeeded. 2026-03-07T10:28:29.122 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction test 2026-03-07T10:28:29.192 INFO:teuthology.orchestra.run.vm09.stdout:Transaction test succeeded. 2026-03-07T10:28:29.194 INFO:teuthology.orchestra.run.vm09.stdout:Running transaction 2026-03-07T10:28:29.264 INFO:teuthology.orchestra.run.vm08.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-07T10:28:29.264 INFO:teuthology.orchestra.run.vm08.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-07T10:28:29.264 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:28:29.382 INFO:teuthology.orchestra.run.vm09.stdout: Preparing : 1/1 2026-03-07T10:28:29.395 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/7 2026-03-07T10:28:29.409 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/7 2026-03-07T10:28:29.420 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-07T10:28:29.428 INFO:teuthology.orchestra.run.vm09.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-07T10:28:29.433 INFO:teuthology.orchestra.run.vm09.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/7 2026-03-07T10:28:29.488 INFO:teuthology.orchestra.run.vm09.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/7 2026-03-07T10:28:29.614 INFO:teuthology.orchestra.run.vm09.stdout: Installing : runc-4:1.4.0-2.el9.x86_64 6/7 2026-03-07T10:28:29.627 INFO:teuthology.orchestra.run.vm09.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-07T10:28:29.835 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/7 2026-03-07T10:28:29.836 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/7 2026-03-07T10:28:29.836 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-07T10:28:29.836 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-07T10:28:29.836 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/7 2026-03-07T10:28:29.836 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/7 2026-03-07T10:28:29.932 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : runc-4:1.4.0-2.el9.x86_64 7/7 2026-03-07T10:28:29.932 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:28:29.932 INFO:teuthology.orchestra.run.vm08.stdout:Installed: 2026-03-07T10:28:29.932 INFO:teuthology.orchestra.run.vm08.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-07T10:28:29.932 INFO:teuthology.orchestra.run.vm08.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-07T10:28:29.932 INFO:teuthology.orchestra.run.vm08.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-07T10:28:29.932 INFO:teuthology.orchestra.run.vm08.stdout: runc-4:1.4.0-2.el9.x86_64 2026-03-07T10:28:29.932 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:28:29.932 INFO:teuthology.orchestra.run.vm08.stdout:Complete! 2026-03-07T10:28:30.022 INFO:teuthology.orchestra.run.vm09.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-07T10:28:30.022 INFO:teuthology.orchestra.run.vm09.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-07T10:28:30.022 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-07T10:28:30.034 DEBUG:teuthology.parallel:result is None 2026-03-07T10:28:30.602 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/7 2026-03-07T10:28:30.602 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/7 2026-03-07T10:28:30.602 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-07T10:28:30.602 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-07T10:28:30.602 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/7 2026-03-07T10:28:30.602 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/7 2026-03-07T10:28:30.692 INFO:teuthology.orchestra.run.vm09.stdout: Verifying : runc-4:1.4.0-2.el9.x86_64 7/7 2026-03-07T10:28:30.692 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-07T10:28:30.693 INFO:teuthology.orchestra.run.vm09.stdout:Installed: 2026-03-07T10:28:30.693 INFO:teuthology.orchestra.run.vm09.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-07T10:28:30.693 INFO:teuthology.orchestra.run.vm09.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-07T10:28:30.693 INFO:teuthology.orchestra.run.vm09.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-07T10:28:30.693 INFO:teuthology.orchestra.run.vm09.stdout: runc-4:1.4.0-2.el9.x86_64 2026-03-07T10:28:30.693 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-07T10:28:30.693 INFO:teuthology.orchestra.run.vm09.stdout:Complete! 2026-03-07T10:28:30.801 DEBUG:teuthology.parallel:result is None 2026-03-07T10:28:30.802 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-07T10:28:30.847 INFO:tasks.cephadm:Config: {'conf': {'global': {'mon election default strategy': 3}, 'mgr': {'debug mgr': 20, 'debug ms': 1, 'mgr/cephadm/use_agent': True}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': '340d3c24fc6ae7529322dc7ccee6c6cb2589da0a', 'cephadm_binary_url': 'https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm', 'containers': {'image': 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5'}} 2026-03-07T10:28:30.847 INFO:tasks.cephadm:Provided image contains tag or digest, using it as is 2026-03-07T10:28:30.847 INFO:tasks.cephadm:Cluster image is harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 2026-03-07T10:28:30.847 INFO:tasks.cephadm:Cluster fsid is 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:28:30.847 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-07T10:28:30.847 INFO:tasks.cephadm:Monitor IPs: {'mon.a': '192.168.123.108', 'mon.b': '192.168.123.109'} 2026-03-07T10:28:30.847 INFO:tasks.cephadm:First mon is mon.a on vm08 2026-03-07T10:28:30.847 INFO:tasks.cephadm:First mgr is a 2026-03-07T10:28:30.847 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-07T10:28:30.847 DEBUG:teuthology.orchestra.run.vm08:> sudo hostname $(hostname -s) 2026-03-07T10:28:30.876 DEBUG:teuthology.orchestra.run.vm09:> sudo hostname $(hostname -s) 2026-03-07T10:28:30.912 INFO:tasks.cephadm:Downloading cephadm from url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm 2026-03-07T10:28:30.912 DEBUG:teuthology.orchestra.run.vm08:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-07T10:28:32.009 INFO:teuthology.orchestra.run.vm08.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 7 10:28 /home/ubuntu/cephtest/cephadm 2026-03-07T10:28:32.009 DEBUG:teuthology.orchestra.run.vm09:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-07T10:28:33.082 INFO:teuthology.orchestra.run.vm09.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 7 10:28 /home/ubuntu/cephtest/cephadm 2026-03-07T10:28:33.082 DEBUG:teuthology.orchestra.run.vm08:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-07T10:28:33.103 DEBUG:teuthology.orchestra.run.vm09:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-07T10:28:33.124 INFO:tasks.cephadm:Pulling image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 on all hosts... 2026-03-07T10:28:33.125 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-07T10:28:33.147 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-07T10:28:33.352 INFO:teuthology.orchestra.run.vm08.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:28:33.376 INFO:teuthology.orchestra.run.vm09.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:29:00.684 INFO:teuthology.orchestra.run.vm08.stdout:{ 2026-03-07T10:29:00.684 INFO:teuthology.orchestra.run.vm08.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-07T10:29:00.684 INFO:teuthology.orchestra.run.vm08.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-07T10:29:00.684 INFO:teuthology.orchestra.run.vm08.stdout: "repo_digests": [ 2026-03-07T10:29:00.684 INFO:teuthology.orchestra.run.vm08.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-07T10:29:00.685 INFO:teuthology.orchestra.run.vm08.stdout: ] 2026-03-07T10:29:00.685 INFO:teuthology.orchestra.run.vm08.stdout:} 2026-03-07T10:29:01.116 INFO:teuthology.orchestra.run.vm09.stdout:{ 2026-03-07T10:29:01.116 INFO:teuthology.orchestra.run.vm09.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-07T10:29:01.116 INFO:teuthology.orchestra.run.vm09.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-07T10:29:01.116 INFO:teuthology.orchestra.run.vm09.stdout: "repo_digests": [ 2026-03-07T10:29:01.116 INFO:teuthology.orchestra.run.vm09.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-07T10:29:01.116 INFO:teuthology.orchestra.run.vm09.stdout: ] 2026-03-07T10:29:01.116 INFO:teuthology.orchestra.run.vm09.stdout:} 2026-03-07T10:29:01.133 DEBUG:teuthology.orchestra.run.vm08:> sudo mkdir -p /etc/ceph 2026-03-07T10:29:01.163 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /etc/ceph 2026-03-07T10:29:01.195 DEBUG:teuthology.orchestra.run.vm08:> sudo chmod 777 /etc/ceph 2026-03-07T10:29:01.229 DEBUG:teuthology.orchestra.run.vm09:> sudo chmod 777 /etc/ceph 2026-03-07T10:29:01.264 INFO:tasks.cephadm:Writing seed config... 2026-03-07T10:29:01.265 INFO:tasks.cephadm: override: [global] mon election default strategy = 3 2026-03-07T10:29:01.265 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-07T10:29:01.265 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-07T10:29:01.265 INFO:tasks.cephadm: override: [mgr] mgr/cephadm/use_agent = True 2026-03-07T10:29:01.265 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-07T10:29:01.265 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-07T10:29:01.265 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-07T10:29:01.265 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-07T10:29:01.265 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-07T10:29:01.265 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-07T10:29:01.265 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:29:01.265 DEBUG:teuthology.orchestra.run.vm08:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-07T10:29:01.290 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 630831e6-1a10-11f1-b289-9dc3f8f14d3d mon election default strategy = 3 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = true bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 mgr/cephadm/use_agent = True [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-07T10:29:01.291 DEBUG:teuthology.orchestra.run.vm08:mon.a> sudo journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service 2026-03-07T10:29:01.335 DEBUG:teuthology.orchestra.run.vm08:mgr.a> sudo journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mgr.a.service 2026-03-07T10:29:01.378 INFO:tasks.cephadm:Bootstrapping... 2026-03-07T10:29:01.378 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 -v bootstrap --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id a --orphan-initial-daemons --skip-monitoring-stack --mon-ip 192.168.123.108 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:29:01.519 INFO:teuthology.orchestra.run.vm08.stdout:-------------------------------------------------------------------------------- 2026-03-07T10:29:01.519 INFO:teuthology.orchestra.run.vm08.stdout:cephadm ['--image', 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5', '-v', 'bootstrap', '--fsid', '630831e6-1a10-11f1-b289-9dc3f8f14d3d', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-id', 'a', '--mgr-id', 'a', '--orphan-initial-daemons', '--skip-monitoring-stack', '--mon-ip', '192.168.123.108', '--skip-admin-label'] 2026-03-07T10:29:01.519 INFO:teuthology.orchestra.run.vm08.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-07T10:29:01.520 INFO:teuthology.orchestra.run.vm08.stdout:Verifying podman|docker is present... 2026-03-07T10:29:01.542 INFO:teuthology.orchestra.run.vm08.stdout:/bin/podman: stdout 5.8.0 2026-03-07T10:29:01.542 INFO:teuthology.orchestra.run.vm08.stdout:Verifying lvm2 is present... 2026-03-07T10:29:01.542 INFO:teuthology.orchestra.run.vm08.stdout:Verifying time synchronization is in place... 2026-03-07T10:29:01.551 INFO:teuthology.orchestra.run.vm08.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-07T10:29:01.551 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-07T10:29:01.558 INFO:teuthology.orchestra.run.vm08.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-07T10:29:01.558 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stdout inactive 2026-03-07T10:29:01.565 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stdout enabled 2026-03-07T10:29:01.570 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stdout active 2026-03-07T10:29:01.570 INFO:teuthology.orchestra.run.vm08.stdout:Unit chronyd.service is enabled and running 2026-03-07T10:29:01.570 INFO:teuthology.orchestra.run.vm08.stdout:Repeating the final host check... 2026-03-07T10:29:01.591 INFO:teuthology.orchestra.run.vm08.stdout:/bin/podman: stdout 5.8.0 2026-03-07T10:29:01.591 INFO:teuthology.orchestra.run.vm08.stdout:podman (/bin/podman) version 5.8.0 is present 2026-03-07T10:29:01.591 INFO:teuthology.orchestra.run.vm08.stdout:systemctl is present 2026-03-07T10:29:01.591 INFO:teuthology.orchestra.run.vm08.stdout:lvcreate is present 2026-03-07T10:29:01.596 INFO:teuthology.orchestra.run.vm08.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-07T10:29:01.596 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-07T10:29:01.603 INFO:teuthology.orchestra.run.vm08.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-07T10:29:01.603 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stdout inactive 2026-03-07T10:29:01.609 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stdout enabled 2026-03-07T10:29:01.615 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stdout active 2026-03-07T10:29:01.616 INFO:teuthology.orchestra.run.vm08.stdout:Unit chronyd.service is enabled and running 2026-03-07T10:29:01.616 INFO:teuthology.orchestra.run.vm08.stdout:Host looks OK 2026-03-07T10:29:01.616 INFO:teuthology.orchestra.run.vm08.stdout:Cluster fsid: 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:29:01.616 INFO:teuthology.orchestra.run.vm08.stdout:Acquiring lock 139691509949200 on /run/cephadm/630831e6-1a10-11f1-b289-9dc3f8f14d3d.lock 2026-03-07T10:29:01.616 INFO:teuthology.orchestra.run.vm08.stdout:Lock 139691509949200 acquired on /run/cephadm/630831e6-1a10-11f1-b289-9dc3f8f14d3d.lock 2026-03-07T10:29:01.616 INFO:teuthology.orchestra.run.vm08.stdout:Verifying IP 192.168.123.108 port 3300 ... 2026-03-07T10:29:01.616 INFO:teuthology.orchestra.run.vm08.stdout:Verifying IP 192.168.123.108 port 6789 ... 2026-03-07T10:29:01.616 INFO:teuthology.orchestra.run.vm08.stdout:Base mon IP(s) is [192.168.123.108:3300, 192.168.123.108:6789], mon addrv is [v2:192.168.123.108:3300,v1:192.168.123.108:6789] 2026-03-07T10:29:01.619 INFO:teuthology.orchestra.run.vm08.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.108 metric 100 2026-03-07T10:29:01.620 INFO:teuthology.orchestra.run.vm08.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.108 metric 100 2026-03-07T10:29:01.622 INFO:teuthology.orchestra.run.vm08.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-07T10:29:01.622 INFO:teuthology.orchestra.run.vm08.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-07T10:29:01.625 INFO:teuthology.orchestra.run.vm08.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-07T10:29:01.625 INFO:teuthology.orchestra.run.vm08.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-07T10:29:01.625 INFO:teuthology.orchestra.run.vm08.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-07T10:29:01.625 INFO:teuthology.orchestra.run.vm08.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-03-07T10:29:01.625 INFO:teuthology.orchestra.run.vm08.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:8/64 scope link noprefixroute 2026-03-07T10:29:01.625 INFO:teuthology.orchestra.run.vm08.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-07T10:29:01.626 INFO:teuthology.orchestra.run.vm08.stdout:Mon IP `192.168.123.108` is in CIDR network `192.168.123.0/24` 2026-03-07T10:29:01.626 INFO:teuthology.orchestra.run.vm08.stdout:Mon IP `192.168.123.108` is in CIDR network `192.168.123.0/24` 2026-03-07T10:29:01.626 INFO:teuthology.orchestra.run.vm08.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-03-07T10:29:01.626 INFO:teuthology.orchestra.run.vm08.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-07T10:29:01.626 INFO:teuthology.orchestra.run.vm08.stdout:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:29:02.313 INFO:teuthology.orchestra.run.vm08.stdout:/bin/podman: stdout 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 2026-03-07T10:29:02.313 INFO:teuthology.orchestra.run.vm08.stdout:/bin/podman: stderr Trying to pull harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:29:02.313 INFO:teuthology.orchestra.run.vm08.stdout:/bin/podman: stderr Getting image source signatures 2026-03-07T10:29:02.313 INFO:teuthology.orchestra.run.vm08.stdout:/bin/podman: stderr Copying blob sha256:89f108f95c9b33ae21c5514f17c1bd5ca646e21d3c5e8ac1e117cf65bcd40261 2026-03-07T10:29:02.313 INFO:teuthology.orchestra.run.vm08.stdout:/bin/podman: stderr Copying config sha256:8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 2026-03-07T10:29:02.313 INFO:teuthology.orchestra.run.vm08.stdout:/bin/podman: stderr Writing manifest to image destination 2026-03-07T10:29:02.699 INFO:teuthology.orchestra.run.vm08.stdout:ceph: stdout ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable) 2026-03-07T10:29:02.699 INFO:teuthology.orchestra.run.vm08.stdout:Ceph version: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable) 2026-03-07T10:29:02.699 INFO:teuthology.orchestra.run.vm08.stdout:Extracting ceph user uid/gid from container image... 2026-03-07T10:29:02.908 INFO:teuthology.orchestra.run.vm08.stdout:stat: stdout 167 167 2026-03-07T10:29:02.908 INFO:teuthology.orchestra.run.vm08.stdout:Creating initial keys... 2026-03-07T10:29:03.146 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph-authtool: stdout AQDv/atp7QJYARAAdVjksiaoHe64eWXzFBm0Mw== 2026-03-07T10:29:03.396 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph-authtool: stdout AQDv/atpympNDxAAFiZu15kbsE+/vbjX3xWsFg== 2026-03-07T10:29:03.642 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph-authtool: stdout AQDv/atpPj3kHhAAt7Y5eaxWKxCJWWL+mfo4SQ== 2026-03-07T10:29:03.643 INFO:teuthology.orchestra.run.vm08.stdout:Creating initial monmap... 2026-03-07T10:29:03.871 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-07T10:29:03.871 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-07T10:29:03.871 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:29:03.871 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-07T10:29:03.871 INFO:teuthology.orchestra.run.vm08.stdout:monmaptool for a [v2:192.168.123.108:3300,v1:192.168.123.108:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-07T10:29:03.871 INFO:teuthology.orchestra.run.vm08.stdout:setting min_mon_release = quincy 2026-03-07T10:29:03.871 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/monmaptool: set fsid to 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:29:03.871 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-07T10:29:03.871 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:03.871 INFO:teuthology.orchestra.run.vm08.stdout:Creating mon... 2026-03-07T10:29:04.126 INFO:teuthology.orchestra.run.vm08.stdout:create mon.a on 2026-03-07T10:29:04.410 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-07T10:29:04.549 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d.target → /etc/systemd/system/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d.target. 2026-03-07T10:29:04.549 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d.target → /etc/systemd/system/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d.target. 2026-03-07T10:29:04.711 INFO:teuthology.orchestra.run.vm08.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a 2026-03-07T10:29:04.711 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stderr Failed to reset failed state of unit ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service: Unit ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service not loaded. 2026-03-07T10:29:04.865 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d.target.wants/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service → /etc/systemd/system/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@.service. 2026-03-07T10:29:05.048 INFO:teuthology.orchestra.run.vm08.stdout:firewalld does not appear to be present 2026-03-07T10:29:05.048 INFO:teuthology.orchestra.run.vm08.stdout:Not possible to enable service . firewalld.service is not available 2026-03-07T10:29:05.048 INFO:teuthology.orchestra.run.vm08.stdout:Waiting for mon to start... 2026-03-07T10:29:05.048 INFO:teuthology.orchestra.run.vm08.stdout:Waiting for mon... 2026-03-07T10:29:06.202 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout cluster: 2026-03-07T10:29:06.202 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout id: 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:29:06.202 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-07T10:29:06.202 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:06.202 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout services: 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum a (age 0.175595s) 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout data: 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout pgs: 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:mon is available 2026-03-07T10:29:06.203 INFO:teuthology.orchestra.run.vm08.stdout:Assimilating anything we can from ceph.conf... 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout [global] 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout fsid = 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.108:3300,v1:192.168.123.108:6789] 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mgr/cephadm/use_agent = True 2026-03-07T10:29:06.616 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-07T10:29:06.617 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:06.617 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout [osd] 2026-03-07T10:29:06.617 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-07T10:29:06.617 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-07T10:29:06.617 INFO:teuthology.orchestra.run.vm08.stdout:Generating new minimal ceph.conf... 2026-03-07T10:29:07.028 INFO:teuthology.orchestra.run.vm08.stdout:Restarting the monitor... 2026-03-07T10:29:07.542 INFO:teuthology.orchestra.run.vm08.stdout:Setting public_network to 192.168.123.0/24 in mon config section 2026-03-07T10:29:08.402 INFO:teuthology.orchestra.run.vm08.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-07T10:29:08.515 INFO:teuthology.orchestra.run.vm08.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:29:08.515 INFO:teuthology.orchestra.run.vm08.stdout:Creating mgr... 2026-03-07T10:29:08.516 INFO:teuthology.orchestra.run.vm08.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-07T10:29:08.516 INFO:teuthology.orchestra.run.vm08.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-07T10:29:08.883 INFO:teuthology.orchestra.run.vm08.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mgr.a 2026-03-07T10:29:08.883 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stderr Failed to reset failed state of unit ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mgr.a.service: Unit ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mgr.a.service not loaded. 2026-03-07T10:29:09.022 INFO:teuthology.orchestra.run.vm08.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d.target.wants/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mgr.a.service → /etc/systemd/system/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@.service. 2026-03-07T10:29:09.221 INFO:teuthology.orchestra.run.vm08.stdout:firewalld does not appear to be present 2026-03-07T10:29:09.221 INFO:teuthology.orchestra.run.vm08.stdout:Not possible to enable service . firewalld.service is not available 2026-03-07T10:29:09.221 INFO:teuthology.orchestra.run.vm08.stdout:firewalld does not appear to be present 2026-03-07T10:29:09.221 INFO:teuthology.orchestra.run.vm08.stdout:Not possible to open ports <[9283, 8765]>. firewalld.service is not available 2026-03-07T10:29:09.221 INFO:teuthology.orchestra.run.vm08.stdout:Waiting for mgr to start... 2026-03-07T10:29:09.221 INFO:teuthology.orchestra.run.vm08.stdout:Waiting for mgr... 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsid": "630831e6-1a10-11f1-b289-9dc3f8f14d3d", 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_age": 1, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:29:09.670 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:29:05:887604+0000", 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:29:09.671 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:09.672 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:09.672 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:09.672 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:29:09.672 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:09.672 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:29:05.891952+0000", 2026-03-07T10:29:09.672 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:09.672 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:09.672 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:29:09.672 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:29:09.672 INFO:teuthology.orchestra.run.vm08.stdout:mgr not available, waiting (1/15)... 2026-03-07T10:29:12.426 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:12.426 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:29:12.426 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsid": "630831e6-1a10-11f1-b289-9dc3f8f14d3d", 2026-03-07T10:29:12.426 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:29:12.426 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:29:12.426 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:29:12.426 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:29:12.426 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:12.426 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_age": 4, 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:29:12.427 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:29:05:887604+0000", 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:29:05.891952+0000", 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:29:12.428 INFO:teuthology.orchestra.run.vm08.stdout:mgr not available, waiting (2/15)... 2026-03-07T10:29:14.917 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsid": "630831e6-1a10-11f1-b289-9dc3f8f14d3d", 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_age": 7, 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:29:14.918 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:29:05:887604+0000", 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:29:05.891952+0000", 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:29:14.919 INFO:teuthology.orchestra.run.vm08.stdout:mgr not available, waiting (3/15)... 2026-03-07T10:29:17.442 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:17.442 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:29:17.442 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsid": "630831e6-1a10-11f1-b289-9dc3f8f14d3d", 2026-03-07T10:29:17.442 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:29:17.442 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:29:17.442 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_age": 9, 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:29:17.443 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:29:05:887604+0000", 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:29:05.891952+0000", 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:29:17.444 INFO:teuthology.orchestra.run.vm08.stdout:mgr not available, waiting (4/15)... 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsid": "630831e6-1a10-11f1-b289-9dc3f8f14d3d", 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_age": 12, 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:19.851 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:19.852 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:29:05:887604+0000", 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:29:05.891952+0000", 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:29:19.853 INFO:teuthology.orchestra.run.vm08.stdout:mgr not available, waiting (5/15)... 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsid": "630831e6-1a10-11f1-b289-9dc3f8f14d3d", 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "quorum_age": 14, 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:29:22.335 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:29:05:887604+0000", 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:29:22.336 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:29:05.891952+0000", 2026-03-07T10:29:22.337 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:29:22.337 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:29:22.337 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:29:22.337 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:29:22.337 INFO:teuthology.orchestra.run.vm08.stdout:mgr is available 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout [global] 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout fsid = 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.108:3300,v1:192.168.123.108:6789] 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout [osd] 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-07T10:29:22.827 INFO:teuthology.orchestra.run.vm08.stdout:Enabling cephadm module... 2026-03-07T10:29:24.457 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:29:24.457 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-03-07T10:29:24.457 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-07T10:29:24.457 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "active_name": "a", 2026-03-07T10:29:24.457 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-07T10:29:24.457 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:29:24.457 INFO:teuthology.orchestra.run.vm08.stdout:Waiting for the mgr to restart... 2026-03-07T10:29:24.457 INFO:teuthology.orchestra.run.vm08.stdout:Waiting for mgr epoch 5... 2026-03-07T10:29:31.022 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:30 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:30.577+0000 7f5fc3742100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:29:32.022 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:31 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:31.552+0000 7f5fc3742100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:29:32.022 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:31 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:31.668+0000 7f5fc3742100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:29:32.022 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:31 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:31.788+0000 7f5fc3742100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:29:32.439 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:32 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:32.034+0000 7f5fc3742100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:29:32.439 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:32 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:32.150+0000 7f5fc3742100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:29:32.439 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:32 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:32.438+0000 7f5fc3742100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:29:33.022 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:32 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:32.755+0000 7f5fc3742100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:29:33.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:33 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:29:33.522 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:33 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:33.092+0000 7f5fc3742100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:29:33.522 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:33 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:33.206+0000 7f5fc3742100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:29:34.385 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:29:34.385 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-03-07T10:29:34.385 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-07T10:29:34.385 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:29:34.385 INFO:teuthology.orchestra.run.vm08.stdout:mgr epoch 5 is available 2026-03-07T10:29:34.385 INFO:teuthology.orchestra.run.vm08.stdout:Setting orchestrator backend to cephadm... 2026-03-07T10:29:34.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:34 vm08 ceph-mon[50288]: Found migration_current of "None". Setting to last migration. 2026-03-07T10:29:34.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:34 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:34.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:34 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:34.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:34 vm08 ceph-mon[50288]: mgrmap e7: a(active, since 1.02443s) 2026-03-07T10:29:35.306 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-07T10:29:35.306 INFO:teuthology.orchestra.run.vm08.stdout:Generating ssh key... 2026-03-07T10:29:36.009 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-mon[50288]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-mon[50288]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-mon[50288]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: Generating public/private rsa key pair. 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: Your identification has been saved in /tmp/tmpvnx8xkqw/key 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: Your public key has been saved in /tmp/tmpvnx8xkqw/key.pub 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: The key fingerprint is: 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: SHA256:n2vKKDRSHjRhLh+/VZcEY6s8f88fKPI5krNR2g/1Y90 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: The key's randomart image is: 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: +---[RSA 3072]----+ 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: | o. +.. | 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: | oo . + . | 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: | ..o. o o | 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: | ooo . o . | 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: | o... S . . | 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: | . + o ++.. o o| 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: | o .. +=+.. =E| 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: | . o +=+=o. o| 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: | .. o+=o..o..| 2026-03-07T10:29:36.010 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:35 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: +----[SHA256]-----+ 2026-03-07T10:29:36.348 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDfOZ+PdBU75V0uc4h3alJdj8DT3gbrilr7EXn04OjGJXIIbBqyXg1oG2A0+GwStH5zJxPlvTQ12+xUcSjCYXQJcEs5Rn5PmD8t/ivyGnw7Fxk5iz7H511r6AxtuZJoFmMLU6tBS9rhWzZNPveLLQc92/Gn/JOC24YVuqmLYVGbqPvPnGmBBNsLLLZdqm01qGCLzpdE6vcchM2MyfYEb/kXybBU7tEtHWG8egYq+y9Hbx8mHXXm5incrIZ7EPMKHnXbCFU/pXfl9P1cs5f+GXjBPg29B2TpE1vWIrqRGg6wqoTk9CvdQYUMXD4CwGcv/HPIrcRSuLsRGLZ4293lla8t45XkDwO8tly3we3jneGPNXQpxzcle93e6wJLLc8i6rDwQgf43CQJsXdH3oOr/l+70IhPtYp5OfYr3ESX8Di/KtSoi8cV5WSHcSEBX0jUbFAkPmmWwuR73mPBA6dOlDz9+bF0GxQnIwgXEIYGJ5Doyxns6F5Z8JKyFYuQ4RS/hmE= ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:29:36.348 INFO:teuthology.orchestra.run.vm08.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-07T10:29:36.348 INFO:teuthology.orchestra.run.vm08.stdout:Adding key to root@localhost authorized_keys... 2026-03-07T10:29:36.349 INFO:teuthology.orchestra.run.vm08.stdout:Adding host vm08... 2026-03-07T10:29:37.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:36 vm08 ceph-mon[50288]: [07/Mar/2026:10:29:34] ENGINE Bus STARTING 2026-03-07T10:29:37.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:36 vm08 ceph-mon[50288]: [07/Mar/2026:10:29:34] ENGINE Serving on https://192.168.123.108:7150 2026-03-07T10:29:37.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:36 vm08 ceph-mon[50288]: [07/Mar/2026:10:29:34] ENGINE Client ('192.168.123.108', 49620) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:29:37.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:36 vm08 ceph-mon[50288]: [07/Mar/2026:10:29:34] ENGINE Serving on http://192.168.123.108:8765 2026-03-07T10:29:37.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:36 vm08 ceph-mon[50288]: [07/Mar/2026:10:29:34] ENGINE Bus STARTED 2026-03-07T10:29:37.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:36 vm08 ceph-mon[50288]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:29:37.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:36 vm08 ceph-mon[50288]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:29:37.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:36 vm08 ceph-mon[50288]: Generating ssh key... 2026-03-07T10:29:37.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:36 vm08 ceph-mon[50288]: mgrmap e8: a(active, since 2s) 2026-03-07T10:29:37.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:36 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:37.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:36 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:38.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:37 vm08 ceph-mon[50288]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:29:38.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:37 vm08 ceph-mon[50288]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm08", "addr": "192.168.123.108", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:29:38.735 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:38 vm08 ceph-mon[50288]: Deploying cephadm binary to vm08 2026-03-07T10:29:38.882 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout Added host 'vm08' with addr '192.168.123.108' 2026-03-07T10:29:38.883 INFO:teuthology.orchestra.run.vm08.stdout:Deploying unmanaged mon service... 2026-03-07T10:29:39.371 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-07T10:29:39.371 INFO:teuthology.orchestra.run.vm08.stdout:Deploying unmanaged mgr service... 2026-03-07T10:29:39.839 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-07T10:29:39.868 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:39 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:39.868 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:39 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:29:39.868 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:39 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:39.868 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:39 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:40.838 INFO:teuthology.orchestra.run.vm08.stdout:Enabling the dashboard module... 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: Added host vm08 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: Saving service mon spec with placement count:5 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: Saving service mgr spec with placement count:2 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/4219944568' entity='client.admin' 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/3635862104' entity='client.admin' 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm08", "caps": []}]: dispatch 2026-03-07T10:29:40.892 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:40 vm08 ceph-mon[50288]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm08", "caps": []}]': finished 2026-03-07T10:29:41.999 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:41 vm08 ceph-mon[50288]: Deploying daemon agent.vm08 on vm08 2026-03-07T10:29:41.999 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:41 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/153428207' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-07T10:29:41.999 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:41 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: ignoring --setuser ceph since I am not root 2026-03-07T10:29:41.999 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:41 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: ignoring --setgroup ceph since I am not root 2026-03-07T10:29:42.272 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:42 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:42.037+0000 7fbcaa985100 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-07T10:29:42.273 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:42 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:42.158+0000 7fbcaa985100 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-07T10:29:42.356 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:29:42.356 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-03-07T10:29:42.356 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-07T10:29:42.356 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "active_name": "a", 2026-03-07T10:29:42.356 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-07T10:29:42.356 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:29:42.356 INFO:teuthology.orchestra.run.vm08.stdout:Waiting for the mgr to restart... 2026-03-07T10:29:42.356 INFO:teuthology.orchestra.run.vm08.stdout:Waiting for mgr epoch 9... 2026-03-07T10:29:43.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:42 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/153428207' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-07T10:29:43.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:42 vm08 ceph-mon[50288]: mgrmap e9: a(active, since 8s) 2026-03-07T10:29:43.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:42 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/148594082' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-07T10:29:43.522 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:43 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:43.206+0000 7fbcaa985100 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-07T10:29:44.272 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:43 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:43.992+0000 7fbcaa985100 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-07T10:29:44.272 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:44 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:44.105+0000 7fbcaa985100 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-07T10:29:44.772 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:44 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:44.330+0000 7fbcaa985100 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-07T10:29:46.272 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:46 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:46.004+0000 7fbcaa985100 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-07T10:29:46.683 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:46 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:46.317+0000 7fbcaa985100 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:29:46.683 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:46 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:46.443+0000 7fbcaa985100 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-07T10:29:46.683 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:46 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:46.554+0000 7fbcaa985100 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-07T10:29:47.022 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:46 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:46.681+0000 7fbcaa985100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:29:47.022 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:46 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:46.794+0000 7fbcaa985100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:29:47.522 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:47 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:47.265+0000 7fbcaa985100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:29:47.522 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:47 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:47.406+0000 7fbcaa985100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:29:48.522 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:48 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:48.069+0000 7fbcaa985100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:29:49.507 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:49 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:49.033+0000 7fbcaa985100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:29:49.507 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:49 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:49.149+0000 7fbcaa985100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:29:49.507 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:49 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:49.266+0000 7fbcaa985100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:29:49.772 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:49 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:49.506+0000 7fbcaa985100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:29:49.772 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:49 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:49.618+0000 7fbcaa985100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:29:50.209 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:49 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:49.896+0000 7fbcaa985100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:29:50.522 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:50.207+0000 7fbcaa985100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: Active manager daemon a restarted 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: Activating manager daemon a 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: osdmap e3: 0 total, 0 up, 0 in 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: mgrmap e10: a(active, starting, since 0.00764985s) 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: Manager daemon a is now available 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:50.540+0000 7fbcaa985100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:29:50.816 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:29:50 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:29:50.652+0000 7fbcaa985100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:29:51.825 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:29:51.825 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-03-07T10:29:51.825 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-07T10:29:51.825 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:29:51.825 INFO:teuthology.orchestra.run.vm08.stdout:mgr epoch 9 is available 2026-03-07T10:29:51.825 INFO:teuthology.orchestra.run.vm08.stdout:Generating a dashboard self-signed certificate... 2026-03-07T10:29:52.338 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-07T10:29:52.338 INFO:teuthology.orchestra.run.vm08.stdout:Creating initial admin user... 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm08", "caps": []}]: dispatch 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: [07/Mar/2026:10:29:51] ENGINE Bus STARTING 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: [07/Mar/2026:10:29:51] ENGINE Serving on http://192.168.123.108:8765 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: [07/Mar/2026:10:29:51] ENGINE Serving on https://192.168.123.108:7150 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: [07/Mar/2026:10:29:51] ENGINE Bus STARTED 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: [07/Mar/2026:10:29:51] ENGINE Client ('192.168.123.108', 60332) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: mgrmap e11: a(active, since 1.01066s) 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: Deploying daemon agent.vm08 on vm08 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:52.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:52 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:52.928 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$XXObG9eu87iGea.hZcnJze4kuMWIJgAltacczmNjrOPqiPHLn/LV6", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1772879392, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-07T10:29:52.929 INFO:teuthology.orchestra.run.vm08.stdout:Fetching dashboard port number... 2026-03-07T10:29:53.379 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stdout 8443 2026-03-07T10:29:53.379 INFO:teuthology.orchestra.run.vm08.stdout:firewalld does not appear to be present 2026-03-07T10:29:53.379 INFO:teuthology.orchestra.run.vm08.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-07T10:29:53.380 INFO:teuthology.orchestra.run.vm08.stdout:Ceph Dashboard is now available at: 2026-03-07T10:29:53.380 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:53.380 INFO:teuthology.orchestra.run.vm08.stdout: URL: https://vm08.local:8443/ 2026-03-07T10:29:53.380 INFO:teuthology.orchestra.run.vm08.stdout: User: admin 2026-03-07T10:29:53.380 INFO:teuthology.orchestra.run.vm08.stdout: Password: w4oh6ez6o1 2026-03-07T10:29:53.380 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:53.380 INFO:teuthology.orchestra.run.vm08.stdout:Saving cluster configuration to /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config directory 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout:Or, if you are only running a single cluster on this host: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: ceph telemetry on 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout:For more information see: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:53.866 INFO:teuthology.orchestra.run.vm08.stdout:Bootstrap complete. 2026-03-07T10:29:53.899 INFO:tasks.cephadm:Fetching config... 2026-03-07T10:29:53.899 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:29:53.899 DEBUG:teuthology.orchestra.run.vm08:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-07T10:29:53.914 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-07T10:29:53.914 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:29:53.914 DEBUG:teuthology.orchestra.run.vm08:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-07T10:29:53.970 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-07T10:29:53.970 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:29:53.970 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/keyring of=/dev/stdout 2026-03-07T10:29:54.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:53 vm08 ceph-mon[50288]: from='client.14168 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:29:54.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:53 vm08 ceph-mon[50288]: from='client.14170 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:29:54.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:53 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:54.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:53 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/1881808815' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-07T10:29:54.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:53 vm08 ceph-mon[50288]: mgrmap e12: a(active, since 2s) 2026-03-07T10:29:54.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:53 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/2901441413' entity='client.admin' 2026-03-07T10:29:54.041 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-07T10:29:54.041 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:29:54.041 DEBUG:teuthology.orchestra.run.vm08:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-07T10:29:54.105 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-07T10:29:54.105 DEBUG:teuthology.orchestra.run.vm08:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDfOZ+PdBU75V0uc4h3alJdj8DT3gbrilr7EXn04OjGJXIIbBqyXg1oG2A0+GwStH5zJxPlvTQ12+xUcSjCYXQJcEs5Rn5PmD8t/ivyGnw7Fxk5iz7H511r6AxtuZJoFmMLU6tBS9rhWzZNPveLLQc92/Gn/JOC24YVuqmLYVGbqPvPnGmBBNsLLLZdqm01qGCLzpdE6vcchM2MyfYEb/kXybBU7tEtHWG8egYq+y9Hbx8mHXXm5incrIZ7EPMKHnXbCFU/pXfl9P1cs5f+GXjBPg29B2TpE1vWIrqRGg6wqoTk9CvdQYUMXD4CwGcv/HPIrcRSuLsRGLZ4293lla8t45XkDwO8tly3we3jneGPNXQpxzcle93e6wJLLc8i6rDwQgf43CQJsXdH3oOr/l+70IhPtYp5OfYr3ESX8Di/KtSoi8cV5WSHcSEBX0jUbFAkPmmWwuR73mPBA6dOlDz9+bF0GxQnIwgXEIYGJ5Doyxns6F5Z8JKyFYuQ4RS/hmE= ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-07T10:29:54.222 INFO:teuthology.orchestra.run.vm08.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDfOZ+PdBU75V0uc4h3alJdj8DT3gbrilr7EXn04OjGJXIIbBqyXg1oG2A0+GwStH5zJxPlvTQ12+xUcSjCYXQJcEs5Rn5PmD8t/ivyGnw7Fxk5iz7H511r6AxtuZJoFmMLU6tBS9rhWzZNPveLLQc92/Gn/JOC24YVuqmLYVGbqPvPnGmBBNsLLLZdqm01qGCLzpdE6vcchM2MyfYEb/kXybBU7tEtHWG8egYq+y9Hbx8mHXXm5incrIZ7EPMKHnXbCFU/pXfl9P1cs5f+GXjBPg29B2TpE1vWIrqRGg6wqoTk9CvdQYUMXD4CwGcv/HPIrcRSuLsRGLZ4293lla8t45XkDwO8tly3we3jneGPNXQpxzcle93e6wJLLc8i6rDwQgf43CQJsXdH3oOr/l+70IhPtYp5OfYr3ESX8Di/KtSoi8cV5WSHcSEBX0jUbFAkPmmWwuR73mPBA6dOlDz9+bF0GxQnIwgXEIYGJ5Doyxns6F5Z8JKyFYuQ4RS/hmE= ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:29:54.235 DEBUG:teuthology.orchestra.run.vm09:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDfOZ+PdBU75V0uc4h3alJdj8DT3gbrilr7EXn04OjGJXIIbBqyXg1oG2A0+GwStH5zJxPlvTQ12+xUcSjCYXQJcEs5Rn5PmD8t/ivyGnw7Fxk5iz7H511r6AxtuZJoFmMLU6tBS9rhWzZNPveLLQc92/Gn/JOC24YVuqmLYVGbqPvPnGmBBNsLLLZdqm01qGCLzpdE6vcchM2MyfYEb/kXybBU7tEtHWG8egYq+y9Hbx8mHXXm5incrIZ7EPMKHnXbCFU/pXfl9P1cs5f+GXjBPg29B2TpE1vWIrqRGg6wqoTk9CvdQYUMXD4CwGcv/HPIrcRSuLsRGLZ4293lla8t45XkDwO8tly3we3jneGPNXQpxzcle93e6wJLLc8i6rDwQgf43CQJsXdH3oOr/l+70IhPtYp5OfYr3ESX8Di/KtSoi8cV5WSHcSEBX0jUbFAkPmmWwuR73mPBA6dOlDz9+bF0GxQnIwgXEIYGJ5Doyxns6F5Z8JKyFYuQ4RS/hmE= ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-07T10:29:54.266 INFO:teuthology.orchestra.run.vm09.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDfOZ+PdBU75V0uc4h3alJdj8DT3gbrilr7EXn04OjGJXIIbBqyXg1oG2A0+GwStH5zJxPlvTQ12+xUcSjCYXQJcEs5Rn5PmD8t/ivyGnw7Fxk5iz7H511r6AxtuZJoFmMLU6tBS9rhWzZNPveLLQc92/Gn/JOC24YVuqmLYVGbqPvPnGmBBNsLLLZdqm01qGCLzpdE6vcchM2MyfYEb/kXybBU7tEtHWG8egYq+y9Hbx8mHXXm5incrIZ7EPMKHnXbCFU/pXfl9P1cs5f+GXjBPg29B2TpE1vWIrqRGg6wqoTk9CvdQYUMXD4CwGcv/HPIrcRSuLsRGLZ4293lla8t45XkDwO8tly3we3jneGPNXQpxzcle93e6wJLLc8i6rDwQgf43CQJsXdH3oOr/l+70IhPtYp5OfYr3ESX8Di/KtSoi8cV5WSHcSEBX0jUbFAkPmmWwuR73mPBA6dOlDz9+bF0GxQnIwgXEIYGJ5Doyxns6F5Z8JKyFYuQ4RS/hmE= ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:29:54.275 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-07T10:29:54.556 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:29:55.146 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-07T10:29:55.146 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-07T10:29:55.439 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:29:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/3155249825' entity='client.admin' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:55.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:55 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:56.103 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm09 2026-03-07T10:29:56.103 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-07T10:29:56.103 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.conf 2026-03-07T10:29:56.117 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-07T10:29:56.117 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:29:56.172 INFO:tasks.cephadm:Adding host vm09 to orchestrator... 2026-03-07T10:29:56.172 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch host add vm09 2026-03-07T10:29:56.427 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:29:56.768 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:56 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:56.768 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:56 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:56.768 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:56 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:29:56.768 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:56 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:29:56.768 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:56 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:29:56.768 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:56 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:56.768 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:56 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:58.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:57 vm08 ceph-mon[50288]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:29:58.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:57 vm08 ceph-mon[50288]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:29:58.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:57 vm08 ceph-mon[50288]: Updating vm08:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.conf 2026-03-07T10:29:58.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:57 vm08 ceph-mon[50288]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:29:58.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:57 vm08 ceph-mon[50288]: Updating vm08:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.client.admin.keyring 2026-03-07T10:29:58.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:57 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:58.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:57 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:29:58.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:57 vm08 ceph-mon[50288]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm09", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:29:58.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:57 vm08 ceph-mon[50288]: mgrmap e13: a(active, since 6s) 2026-03-07T10:29:58.949 INFO:teuthology.orchestra.run.vm08.stdout:Added host 'vm09' with addr '192.168.123.109' 2026-03-07T10:29:59.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:58 vm08 ceph-mon[50288]: Deploying cephadm binary to vm09 2026-03-07T10:29:59.101 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch host ls --format=json 2026-03-07T10:29:59.256 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:29:59.582 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:29:59.582 INFO:teuthology.orchestra.run.vm08.stdout:[{"addr": "192.168.123.108", "hostname": "vm08", "labels": [], "status": ""}, {"addr": "192.168.123.109", "hostname": "vm09", "labels": [], "status": ""}] 2026-03-07T10:29:59.754 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-07T10:29:59.754 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph osd crush tunables default 2026-03-07T10:29:59.914 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: Added host vm09 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: Updating vm09:/etc/ceph/ceph.conf 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: Updating vm09:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.conf 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: Updating vm09:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.client.admin.keyring 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm09", "caps": []}]: dispatch 2026-03-07T10:30:00.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:29:59 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm09", "caps": []}]': finished 2026-03-07T10:30:00.951 INFO:teuthology.orchestra.run.vm08.stderr:adjusted tunables profile to default 2026-03-07T10:30:01.114 INFO:tasks.cephadm:Adding mon.a on vm08 2026-03-07T10:30:01.114 INFO:tasks.cephadm:Adding mon.b on vm09 2026-03-07T10:30:01.114 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch apply mon '2;vm08:192.168.123.108=a;vm09:192.168.123.109=b' 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/4245337059' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: Deploying daemon agent.vm09 on vm09 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:01.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:00 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:01.301 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.conf 2026-03-07T10:30:01.672 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled mon update... 2026-03-07T10:30:01.843 DEBUG:teuthology.orchestra.run.vm09:mon.b> sudo journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.b.service 2026-03-07T10:30:01.844 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-07T10:30:01.844 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph mon dump -f json 2026-03-07T10:30:02.041 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.conf 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/4245337059' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm08:192.168.123.108=a;vm09:192.168.123.109=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: Saving service mon spec with placement vm08:192.168.123.108=a;vm09:192.168.123.109=b;count:2 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:02.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:01 vm08 ceph-mon[50288]: Deploying daemon mon.b on vm09 2026-03-07T10:30:02.435 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-07T10:30:02.435 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"630831e6-1a10-11f1-b289-9dc3f8f14d3d","modified":"2026-03-07T10:29:03.741746Z","created":"2026-03-07T10:29:03.741746Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:3300","nonce":0},{"type":"v1","addr":"192.168.123.108:6789","nonce":0}]},"addr":"192.168.123.108:6789/0","public_addr":"192.168.123.108:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-07T10:30:02.435 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-07T10:30:03.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:02 vm08 ceph-mon[50288]: from='client.? 192.168.123.109:0/1748441098' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:30:03.611 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-07T10:30:03.611 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph mon dump -f json 2026-03-07T10:30:03.860 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.b/config 2026-03-07T10:30:04.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:03 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:03 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.367 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-07T10:30:04.367 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"630831e6-1a10-11f1-b289-9dc3f8f14d3d","modified":"2026-03-07T10:29:03.741746Z","created":"2026-03-07T10:29:03.741746Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:3300","nonce":0},{"type":"v1","addr":"192.168.123.108:6789","nonce":0}]},"addr":"192.168.123.108:6789/0","public_addr":"192.168.123.108:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-07T10:30:04.368 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-07T10:30:04.386 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 podman[50690]: 2026-03-07 10:30:04.374885396 +0000 UTC m=+0.059192877 container init d42739b1670cd377ef4324fb0fbfa9a9607737dc989bd88f7593ed4c6656c824 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 podman[50690]: 2026-03-07 10:30:04.38848565 +0000 UTC m=+0.072793122 container start d42739b1670cd377ef4324fb0fbfa9a9607737dc989bd88f7593ed4c6656c824 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 bash[50690]: d42739b1670cd377ef4324fb0fbfa9a9607737dc989bd88f7593ed4c6656c824 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 podman[50690]: 2026-03-07 10:30:04.330437075 +0000 UTC m=+0.014744547 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 systemd[1]: Started Ceph mon.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d. 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: set uid:gid to 167:167 (ceph:ceph) 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable), process ceph-mon, pid 6 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: pidfile_write: ignore empty --pid-file 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: load: jerasure load: lrc 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: RocksDB version: 7.9.2 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Git sha 0 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Compile date 2026-03-06 13:52:12 2026-03-07T10:30:04.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: DB SUMMARY 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: DB Session ID: POCBOFAQ7D1DXACYEBQM 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: CURRENT file: CURRENT 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: IDENTITY file: IDENTITY 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: MANIFEST file: MANIFEST-000005 size: 59 Bytes 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: SST files in /var/lib/ceph/mon/ceph-b/store.db dir, Total Num: 0, files: 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-b/store.db: 000004.log size: 511 ; 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.error_if_exists: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.create_if_missing: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.paranoid_checks: 1 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.env: 0x560ab901eca0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.fs: PosixFileSystem 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.info_log: 0x560abb142320 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_file_opening_threads: 16 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.statistics: (nil) 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.use_fsync: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_log_file_size: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.keep_log_file_num: 1000 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.recycle_log_file_num: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.allow_fallocate: 1 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.allow_mmap_reads: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.allow_mmap_writes: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.use_direct_reads: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.create_missing_column_families: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.db_log_dir: 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.wal_dir: 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.advise_random_on_open: 1 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.db_write_buffer_size: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.write_buffer_manager: 0x560abb147900 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.rate_limiter: (nil) 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.wal_recovery_mode: 2 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.enable_thread_tracking: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.enable_pipelined_write: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.unordered_write: 0 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.row_cache: None 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.wal_filter: None 2026-03-07T10:30:04.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.allow_ingest_behind: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.two_write_queues: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.manual_wal_flush: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.wal_compression: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.atomic_flush: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.log_readahead_size: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.best_efforts_recovery: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.allow_data_in_errors: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.db_host_id: __hostname__ 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_background_jobs: 2 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_background_compactions: -1 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_subcompactions: 1 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_total_wal_size: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_open_files: -1 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bytes_per_sync: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_readahead_size: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_background_flushes: -1 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Compression algorithms supported: 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: kZSTD supported: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: kXpressCompression supported: 0 2026-03-07T10:30:04.843 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: kBZip2Compression supported: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: kLZ4Compression supported: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: kZlibCompression supported: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: kLZ4HCCompression supported: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: kSnappyCompression supported: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000005 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.merge_operator: 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_filter: None 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_filter_factory: None 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.sst_partitioner_factory: None 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x560abb1423e0) 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: cache_index_and_filter_blocks: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: pin_top_level_index_and_filter: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: index_type: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: data_block_index_type: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: index_shortening: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: checksum: 4 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: no_block_cache: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache: 0x560abb1671f0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_name: BinnedLRUCache 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_options: 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: capacity : 536870912 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: num_shard_bits : 4 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: strict_capacity_limit : 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: high_pri_pool_ratio: 0.000 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_compressed: (nil) 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: persistent_cache: (nil) 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: block_size: 4096 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: block_size_deviation: 10 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: block_restart_interval: 16 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: index_block_restart_interval: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: metadata_block_size: 4096 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: partition_filters: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: use_delta_encoding: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: filter_policy: bloomfilter 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: whole_key_filtering: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: verify_compression: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: read_amp_bytes_per_bit: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: format_version: 5 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: enable_index_compression: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: block_align: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: max_auto_readahead_size: 262144 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: prepopulate_block_cache: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: initial_auto_readahead_size: 8192 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout: num_file_reads_for_auto_readahead: 2 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.write_buffer_size: 33554432 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_write_buffer_number: 2 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compression: NoCompression 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bottommost_compression: Disabled 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.prefix_extractor: nullptr 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.num_levels: 7 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-07T10:30:04.844 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compression_opts.level: 32767 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compression_opts.strategy: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compression_opts.enabled: false 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.target_file_size_base: 67108864 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.arena_block_size: 1048576 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.disable_auto_compactions: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.inplace_update_support: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.bloom_locality: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.max_successive_merges: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.paranoid_file_checks: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.force_consistency_checks: 1 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.report_bg_io_stats: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.ttl: 2592000 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.enable_blob_files: false 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.min_blob_size: 0 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.blob_file_size: 268435456 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-07T10:30:04.845 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.blob_file_starting_level: 0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000005 succeeded,manifest_file_number is 5, next_file_number is 7, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 38d41774-76de-4e79-ad06-f109524b8d98 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879404416894, "job": 1, "event": "recovery_started", "wal_files": [4]} 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #4 mode 2 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879404417834, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1643, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 1, "largest_seqno": 5, "table_properties": {"data_size": 523, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 401, "raw_average_value_size": 80, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1772879404, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "38d41774-76de-4e79-ad06-f109524b8d98", "db_session_id": "POCBOFAQ7D1DXACYEBQM", "orig_file_number": 8, "seqno_to_time_mapping": "N/A"}} 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879404417887, "job": 1, "event": "recovery_finished"} 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: [db/version_set.cc:5047] Creating manifest 10 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-b/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x560abb168e00 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: DB pointer 0x560abb280000 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: ** DB Stats ** 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: ** Compaction Stats [default] ** 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: L0 1/0 1.60 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 1.7 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Sum 1/0 1.60 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 1.7 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 1.7 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: ** Compaction Stats [default] ** 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.7 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative compaction: 0.00 GB write, 0.20 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval compaction: 0.00 GB write, 0.20 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Block cache BinnedLRUCache@0x560abb1671f0#6 capacity: 512.00 MB usage: 0.22 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 9e-06 secs_since: 0 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.11 KB,2.08616e-05%) Misc(1,0.00 KB,0%) 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b does not exist in monmap, will attempt to join an existing cluster 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: using public_addr v2:192.168.123.109:0/0 -> [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: starting mon.b rank -1 at public addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] at bind addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon_data /var/lib/ceph/mon/ceph-b fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(???) e0 preinit fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).mds e1 new map 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).mds e1 print_map 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: e1 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: btime 2026-03-07T10:29:05:887604+0000 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: legacy client fscid: -1 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout: No filesystems configured 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-03-07T10:30:04.846 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).osd e4 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mkfs 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: monmap epoch 1 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: last_changed 2026-03-07T10:29:03.741746+0000 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: created 2026-03-07T10:29:03.741746+0000 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: min_mon_release 19 (squid) 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: election_strategy: 1 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: 0: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.a 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: fsmap 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: osdmap e1: 0 total, 0 up, 0 in 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e1: no daemons active 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/2897291996' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/869805889' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/869805889' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/487409536' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: monmap epoch 1 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: last_changed 2026-03-07T10:29:03.741746+0000 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: created 2026-03-07T10:29:03.741746+0000 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: min_mon_release 19 (squid) 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: election_strategy: 1 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: 0: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.a 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: fsmap 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: osdmap e1: 0 total, 0 up, 0 in 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e1: no daemons active 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/913726070' entity='client.admin' 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/3584225960' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/356207855' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/1402257923' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/1755919677' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Activating manager daemon a 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e2: a(active, starting, since 0.00440779s) 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14100 192.168.123.108:0/317259040' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14100 192.168.123.108:0/317259040' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14100 192.168.123.108:0/317259040' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14100 192.168.123.108:0/317259040' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14100 192.168.123.108:0/317259040' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Manager daemon a is now available 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14100 192.168.123.108:0/317259040' entity='mgr.a' 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14100 192.168.123.108:0/317259040' entity='mgr.a' 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14100 192.168.123.108:0/317259040' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14100 192.168.123.108:0/317259040' entity='mgr.a' 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14100 192.168.123.108:0/317259040' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/372910494' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e3: a(active, since 1.00872s) 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e4: a(active, since 2s) 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/4003536494' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/3969686415' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/3969686415' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/4219122155' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/4219122155' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e5: a(active, since 4s) 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/314875287' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Active manager daemon a restarted 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Activating manager daemon a 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: osdmap e2: 0 total, 0 up, 0 in 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e6: a(active, starting, since 0.0217555s) 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:30:04.847 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Manager daemon a is now available 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Found migration_current of "None". Setting to last migration. 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e7: a(active, since 1.02443s) 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: [07/Mar/2026:10:29:34] ENGINE Bus STARTING 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: [07/Mar/2026:10:29:34] ENGINE Serving on https://192.168.123.108:7150 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: [07/Mar/2026:10:29:34] ENGINE Client ('192.168.123.108', 49620) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: [07/Mar/2026:10:29:34] ENGINE Serving on http://192.168.123.108:8765 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: [07/Mar/2026:10:29:34] ENGINE Bus STARTED 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Generating ssh key... 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e8: a(active, since 2s) 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm08", "addr": "192.168.123.108", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Deploying cephadm binary to vm08 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Added host vm08 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Saving service mon spec with placement count:5 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Saving service mgr spec with placement count:2 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/4219944568' entity='client.admin' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/3635862104' entity='client.admin' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm08", "caps": []}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14124 192.168.123.108:0/3706102624' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm08", "caps": []}]': finished 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Deploying daemon agent.vm08 on vm08 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/153428207' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/153428207' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e9: a(active, since 8s) 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/148594082' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Active manager daemon a restarted 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Activating manager daemon a 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: osdmap e3: 0 total, 0 up, 0 in 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e10: a(active, starting, since 0.00764985s) 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Manager daemon a is now available 2026-03-07T10:30:04.848 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm08", "caps": []}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: [07/Mar/2026:10:29:51] ENGINE Bus STARTING 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: [07/Mar/2026:10:29:51] ENGINE Serving on http://192.168.123.108:8765 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: [07/Mar/2026:10:29:51] ENGINE Serving on https://192.168.123.108:7150 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: [07/Mar/2026:10:29:51] ENGINE Bus STARTED 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: [07/Mar/2026:10:29:51] ENGINE Client ('192.168.123.108', 60332) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e11: a(active, since 1.01066s) 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Deploying daemon agent.vm08 on vm08 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14168 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14170 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/1881808815' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e12: a(active, since 2s) 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/2901441413' entity='client.admin' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/3155249825' entity='client.admin' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Updating vm08:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.conf 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Updating vm08:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.client.admin.keyring 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm09", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mgrmap e13: a(active, since 6s) 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Deploying cephadm binary to vm09 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Added host vm09 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:04.849 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Updating vm09:/etc/ceph/ceph.conf 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Updating vm09:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.conf 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Updating vm09:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.client.admin.keyring 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm09", "caps": []}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm09", "caps": []}]': finished 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/4245337059' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Deploying daemon agent.vm09 on vm09 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/4245337059' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm08:192.168.123.108=a;vm09:192.168.123.109=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Saving service mon spec with placement vm08:192.168.123.108=a;vm09:192.168.123.109=b;count:2 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: Deploying daemon mon.b on vm09 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='client.? 192.168.123.109:0/1748441098' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:30:04.850 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:04 vm09 ceph-mon[50738]: mon.b@-1(synchronizing).paxosservice(auth 1..5) refresh upgraded, format 0 -> 3 2026-03-07T10:30:05.518 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-07T10:30:05.518 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph mon dump -f json 2026-03-07T10:30:05.740 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.b/config 2026-03-07T10:30:09.716 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-07T10:30:09.716 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":2,"fsid":"630831e6-1a10-11f1-b289-9dc3f8f14d3d","modified":"2026-03-07T10:30:04.449183Z","created":"2026-03-07T10:29:03.741746Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:3300","nonce":0},{"type":"v1","addr":"192.168.123.108:6789","nonce":0}]},"addr":"192.168.123.108:6789/0","public_addr":"192.168.123.108:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:3300","nonce":0},{"type":"v1","addr":"192.168.123.109:6789","nonce":0}]},"addr":"192.168.123.109:6789/0","public_addr":"192.168.123.109:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-07T10:30:09.716 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 2 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: mon.a calling monitor election 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: mon.b calling monitor election 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: monmap epoch 2 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: last_changed 2026-03-07T10:30:04.449183+0000 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: created 2026-03-07T10:29:03.741746+0000 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: min_mon_release 19 (squid) 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: election_strategy: 1 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: 0: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.a 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: fsmap 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:30:09.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: mgrmap e13: a(active, since 18s) 2026-03-07T10:30:09.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: overall HEALTH_OK 2026-03-07T10:30:09.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:09.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:09.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:09.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:09.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:09.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:09 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: mon.a calling monitor election 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: mon.b calling monitor election 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: monmap epoch 2 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: last_changed 2026-03-07T10:30:04.449183+0000 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: created 2026-03-07T10:29:03.741746+0000 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: min_mon_release 19 (squid) 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: election_strategy: 1 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: 0: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.a 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: fsmap 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: mgrmap e13: a(active, since 18s) 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: overall HEALTH_OK 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:09.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:09 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:09.876 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-07T10:30:09.876 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph config generate-minimal-conf 2026-03-07T10:30:10.167 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:10.490 INFO:teuthology.orchestra.run.vm08.stdout:# minimal ceph.conf for 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:30:10.490 INFO:teuthology.orchestra.run.vm08.stdout:[global] 2026-03-07T10:30:10.490 INFO:teuthology.orchestra.run.vm08.stdout: fsid = 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:30:10.490 INFO:teuthology.orchestra.run.vm08.stdout: mon_host = [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] 2026-03-07T10:30:10.655 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-07T10:30:10.655 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:30:10.655 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.conf 2026-03-07T10:30:10.680 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:30:10.681 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: Updating vm09:/etc/ceph/ceph.conf 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: Updating vm08:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.conf 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: Updating vm09:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.conf 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='client.? 192.168.123.109:0/1172149110' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: Reconfiguring mon.a (unknown last config time)... 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: Reconfiguring daemon mon.a on vm08 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/941687532' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:10.741 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.742 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.742 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:10.742 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:10.742 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:10.742 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:10 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.742 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-07T10:30:10.742 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.conf 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: Updating vm09:/etc/ceph/ceph.conf 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: Updating vm08:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.conf 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: Updating vm09:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/config/ceph.conf 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='client.? 192.168.123.109:0/1172149110' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: Reconfiguring mon.a (unknown last config time)... 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: Reconfiguring daemon mon.a on vm08 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/941687532' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:10.762 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:10 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:10.766 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-07T10:30:10.766 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:30:10.827 INFO:tasks.cephadm:Adding mgr.a on vm08 2026-03-07T10:30:10.827 INFO:tasks.cephadm:Adding mgr.b on vm09 2026-03-07T10:30:10.827 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch apply mgr '2;vm08=a;vm09=b' 2026-03-07T10:30:11.008 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.b/config 2026-03-07T10:30:11.309 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled mgr update... 2026-03-07T10:30:11.463 DEBUG:teuthology.orchestra.run.vm09:mgr.b> sudo journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mgr.b.service 2026-03-07T10:30:11.464 INFO:tasks.cephadm:Deploying OSDs... 2026-03-07T10:30:11.465 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:30:11.465 DEBUG:teuthology.orchestra.run.vm08:> dd if=/scratch_devs of=/dev/stdout 2026-03-07T10:30:11.478 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:30:11.479 DEBUG:teuthology.orchestra.run.vm08:> ls /dev/[sv]d? 2026-03-07T10:30:11.534 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vda 2026-03-07T10:30:11.534 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdb 2026-03-07T10:30:11.534 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdc 2026-03-07T10:30:11.534 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdd 2026-03-07T10:30:11.534 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vde 2026-03-07T10:30:11.534 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-07T10:30:11.534 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-07T10:30:11.534 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdb 2026-03-07T10:30:11.590 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdb 2026-03-07T10:30:11.590 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:30:11.591 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-07T10:30:11.591 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:30:11.591 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:30:11.591 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-07 10:29:56.478979628 +0000 2026-03-07T10:30:11.591 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-07 10:28:29.327627631 +0000 2026-03-07T10:30:11.591 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-07 10:28:29.327627631 +0000 2026-03-07T10:30:11.591 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-07 10:25:09.239000000 +0000 2026-03-07T10:30:11.591 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-07T10:30:11.652 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-07T10:30:11.652 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-07T10:30:11.652 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000131206 s, 3.9 MB/s 2026-03-07T10:30:11.653 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-07T10:30:11.708 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdc 2026-03-07T10:30:11.764 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdc 2026-03-07T10:30:11.764 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:30:11.764 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-07T10:30:11.764 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:30:11.764 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:30:11.764 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-07 10:29:56.481979631 +0000 2026-03-07T10:30:11.764 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-07 10:28:29.363627663 +0000 2026-03-07T10:30:11.764 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-07 10:28:29.363627663 +0000 2026-03-07T10:30:11.764 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-07 10:25:09.241000000 +0000 2026-03-07T10:30:11.765 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: Reconfiguring mon.b (monmap changed)... 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: Reconfiguring daemon mon.b on vm09 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.b", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.b", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:11.772 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:30:11 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:30:11.447+0000 7fbc8b998640 -1 mgr.server handle_report got status from non-daemon mon.b 2026-03-07T10:30:11.793 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-07T10:30:11.793 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-07T10:30:11.793 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000196107 s, 2.6 MB/s 2026-03-07T10:30:11.794 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: Reconfiguring mon.b (monmap changed)... 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: Reconfiguring daemon mon.b on vm09 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.b", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.b", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:30:11.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:11 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:11.849 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdd 2026-03-07T10:30:11.905 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdd 2026-03-07T10:30:11.905 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:30:11.905 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-07T10:30:11.905 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:30:11.905 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:30:11.905 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-07 10:29:56.486979638 +0000 2026-03-07T10:30:11.905 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-07 10:28:29.347627649 +0000 2026-03-07T10:30:11.905 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-07 10:28:29.347627649 +0000 2026-03-07T10:30:11.905 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-07 10:25:09.244000000 +0000 2026-03-07T10:30:11.905 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-07T10:30:11.967 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-07T10:30:11.967 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-07T10:30:11.967 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000151474 s, 3.4 MB/s 2026-03-07T10:30:11.968 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-07T10:30:12.024 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vde 2026-03-07T10:30:12.080 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vde 2026-03-07T10:30:12.080 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:30:12.080 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-07T10:30:12.081 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:30:12.081 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:30:12.081 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-07 10:29:56.493979646 +0000 2026-03-07T10:30:12.081 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-07 10:28:29.335627638 +0000 2026-03-07T10:30:12.081 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-07 10:28:29.335627638 +0000 2026-03-07T10:30:12.081 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-07 10:25:09.321000000 +0000 2026-03-07T10:30:12.081 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-07T10:30:12.128 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:12 vm09 podman[52201]: 2026-03-07 10:30:12.077602225 +0000 UTC m=+0.017838437 container create f985e2249ea378a54d87b155c0c0de1e0c7adf5a43ad6ab2c10f33868909ddb9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:30:12.143 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-07T10:30:12.143 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-07T10:30:12.143 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000163236 s, 3.1 MB/s 2026-03-07T10:30:12.144 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-07T10:30:12.203 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-07T10:30:12.203 DEBUG:teuthology.orchestra.run.vm09:> dd if=/scratch_devs of=/dev/stdout 2026-03-07T10:30:12.224 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:30:12.225 DEBUG:teuthology.orchestra.run.vm09:> ls /dev/[sv]d? 2026-03-07T10:30:12.284 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vda 2026-03-07T10:30:12.284 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdb 2026-03-07T10:30:12.284 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdc 2026-03-07T10:30:12.284 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdd 2026-03-07T10:30:12.284 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vde 2026-03-07T10:30:12.284 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-07T10:30:12.284 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-07T10:30:12.284 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdb 2026-03-07T10:30:12.342 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdb 2026-03-07T10:30:12.342 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:30:12.342 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-07T10:30:12.342 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:30:12.342 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:30:12.342 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-07 10:30:06.071469332 +0000 2026-03-07T10:30:12.342 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-07 10:28:30.097197841 +0000 2026-03-07T10:30:12.342 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-07 10:28:30.097197841 +0000 2026-03-07T10:30:12.342 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-07 10:25:34.227000000 +0000 2026-03-07T10:30:12.343 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-07T10:30:12.406 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:12 vm09 podman[52201]: 2026-03-07 10:30:12.127337942 +0000 UTC m=+0.067574154 container init f985e2249ea378a54d87b155c0c0de1e0c7adf5a43ad6ab2c10f33868909ddb9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:30:12.406 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:12 vm09 podman[52201]: 2026-03-07 10:30:12.132832976 +0000 UTC m=+0.073069188 container start f985e2249ea378a54d87b155c0c0de1e0c7adf5a43ad6ab2c10f33868909ddb9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:30:12.406 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:12 vm09 bash[52201]: f985e2249ea378a54d87b155c0c0de1e0c7adf5a43ad6ab2c10f33868909ddb9 2026-03-07T10:30:12.406 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:12 vm09 podman[52201]: 2026-03-07 10:30:12.067970598 +0000 UTC m=+0.008206810 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:30:12.406 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:12 vm09 systemd[1]: Started Ceph mgr.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d. 2026-03-07T10:30:12.406 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:12.371+0000 7f45c6bb6100 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-07T10:30:12.409 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-07T10:30:12.409 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-07T10:30:12.409 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000146745 s, 3.5 MB/s 2026-03-07T10:30:12.410 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-07T10:30:12.468 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdc 2026-03-07T10:30:12.531 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdc 2026-03-07T10:30:12.531 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:30:12.531 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-07T10:30:12.531 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:30:12.531 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:30:12.531 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-07 10:30:06.073469334 +0000 2026-03-07T10:30:12.531 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-07 10:28:30.100197842 +0000 2026-03-07T10:30:12.532 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-07 10:28:30.100197842 +0000 2026-03-07T10:30:12.532 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-07 10:25:34.236000000 +0000 2026-03-07T10:30:12.532 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-07T10:30:12.607 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-07T10:30:12.607 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-07T10:30:12.607 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000414375 s, 1.2 MB/s 2026-03-07T10:30:12.607 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-07T10:30:12.671 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:12.499+0000 7f45c6bb6100 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-07T10:30:12.672 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdd 2026-03-07T10:30:12.740 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdd 2026-03-07T10:30:12.740 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:30:12.740 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-07T10:30:12.740 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:30:12.740 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:30:12.740 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-07 10:30:06.076469336 +0000 2026-03-07T10:30:12.740 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-07 10:28:30.092197840 +0000 2026-03-07T10:30:12.740 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-07 10:28:30.092197840 +0000 2026-03-07T10:30:12.740 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-07 10:25:34.242000000 +0000 2026-03-07T10:30:12.740 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='client.14202 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm08=a;vm09=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: Saving service mgr spec with placement vm08=a;vm09=b;count:2 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: Deploying daemon mgr.b on vm09 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.a", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:12.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:12.773 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:12 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:12.808 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-07T10:30:12.808 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-07T10:30:12.808 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000134171 s, 3.8 MB/s 2026-03-07T10:30:12.809 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-07T10:30:12.871 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vde 2026-03-07T10:30:12.932 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vde 2026-03-07T10:30:12.932 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:30:12.932 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-07T10:30:12.932 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:30:12.932 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:30:12.932 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-07 10:30:06.079469339 +0000 2026-03-07T10:30:12.932 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-07 10:28:30.101197842 +0000 2026-03-07T10:30:12.932 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-07 10:28:30.101197842 +0000 2026-03-07T10:30:12.932 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-07 10:25:34.249000000 +0000 2026-03-07T10:30:12.932 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='client.14202 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm08=a;vm09=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: Saving service mgr spec with placement vm08=a;vm09=b;count:2 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: Deploying daemon mgr.b on vm09 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.a", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:12.933 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:12 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:13.003 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-07T10:30:13.003 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-07T10:30:13.003 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000105386 s, 4.9 MB/s 2026-03-07T10:30:13.003 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-07T10:30:13.068 INFO:tasks.cephadm:Deploying osd.0 on vm08 with /dev/vde... 2026-03-07T10:30:13.068 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- lvm zap /dev/vde 2026-03-07T10:30:13.223 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:13.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:13 vm09 ceph-mon[50738]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-07T10:30:13.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:13 vm09 ceph-mon[50738]: Reconfiguring mgr.a (unknown last config time)... 2026-03-07T10:30:13.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:13 vm09 ceph-mon[50738]: Reconfiguring daemon mgr.a on vm08 2026-03-07T10:30:13.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:13 vm09 ceph-mon[50738]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:13.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:13 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:13.842 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:13 vm09 ceph-mon[50738]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-07T10:30:13.974 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:13 vm08 ceph-mon[50288]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-07T10:30:13.975 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:13 vm08 ceph-mon[50288]: Reconfiguring mgr.a (unknown last config time)... 2026-03-07T10:30:13.975 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:13 vm08 ceph-mon[50288]: Reconfiguring daemon mgr.a on vm08 2026-03-07T10:30:13.975 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:13 vm08 ceph-mon[50288]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:13.975 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:13 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:13.975 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:13 vm08 ceph-mon[50288]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-07T10:30:14.212 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:13 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:13.961+0000 7f45c6bb6100 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-07T10:30:14.486 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:14.502 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch daemon add osd vm08:/dev/vde 2026-03-07T10:30:14.655 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:14.876 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:14 vm08 ceph-mon[50288]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:14.807+0000 7f45c6bb6100 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:14.925+0000 7f45c6bb6100 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:15.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:14 vm09 ceph-mon[50738]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:15.591 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:15 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:15.159+0000 7f45c6bb6100 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-07T10:30:15.954 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:15 vm08 ceph-mon[50288]: from='client.14210 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:15.954 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:15 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:30:15.954 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:15 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:30:15.954 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:15 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:15.954 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:15 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:15.954 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:15 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:15.954 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:15 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:16.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:15 vm09 ceph-mon[50738]: from='client.14210 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:16.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:15 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:30:16.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:15 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:30:16.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:15 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:16.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:15 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:16.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:15 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:16.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:15 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:16.878 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:16 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/964650150' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d7e4a242-0a4d-473b-b96f-48aaa3ac7329"}]: dispatch 2026-03-07T10:30:16.878 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:16 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/964650150' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d7e4a242-0a4d-473b-b96f-48aaa3ac7329"}]': finished 2026-03-07T10:30:16.878 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:16 vm08 ceph-mon[50288]: osdmap e5: 1 total, 0 up, 1 in 2026-03-07T10:30:16.878 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:16 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:16.878 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:16 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/4203787495' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:30:16.878 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:16 vm08 ceph-mon[50288]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:17.203 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:16 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:16.888+0000 7f45c6bb6100 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-07T10:30:17.203 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:16 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/964650150' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d7e4a242-0a4d-473b-b96f-48aaa3ac7329"}]: dispatch 2026-03-07T10:30:17.203 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:16 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/964650150' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d7e4a242-0a4d-473b-b96f-48aaa3ac7329"}]': finished 2026-03-07T10:30:17.203 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:16 vm09 ceph-mon[50738]: osdmap e5: 1 total, 0 up, 1 in 2026-03-07T10:30:17.203 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:16 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:17.203 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:16 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/4203787495' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:30:17.203 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:16 vm09 ceph-mon[50738]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:17.569 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:17 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:17.201+0000 7f45c6bb6100 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:30:17.570 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:17 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:17.327+0000 7f45c6bb6100 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-07T10:30:17.570 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:17 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:17.439+0000 7f45c6bb6100 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-07T10:30:17.841 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:17 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:17.567+0000 7f45c6bb6100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:30:17.841 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:17 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:17.688+0000 7f45c6bb6100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:30:18.591 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:18 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:18.175+0000 7f45c6bb6100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:30:18.591 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:18 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:18.319+0000 7f45c6bb6100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:30:19.341 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:19 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:19.001+0000 7f45c6bb6100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:30:19.750 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:19 vm08 ceph-mon[50288]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:20.091 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:19 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:19.974+0000 7f45c6bb6100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:30:20.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:19 vm09 ceph-mon[50738]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:20.466 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:20 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:20.095+0000 7f45c6bb6100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:30:20.466 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:20 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:20.219+0000 7f45c6bb6100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:30:20.841 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:20 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:20.464+0000 7f45c6bb6100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:30:20.841 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:20 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:20.579+0000 7f45c6bb6100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:30:21.184 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:20 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:20.862+0000 7f45c6bb6100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:30:21.526 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:21 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:21.182+0000 7f45c6bb6100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:30:21.841 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:21 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:21.524+0000 7f45c6bb6100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:30:21.841 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:30:21 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b[52211]: 2026-03-07T10:30:21.640+0000 7f45c6bb6100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:30:21.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:21 vm09 ceph-mon[50738]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:21.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:21 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-07T10:30:21.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:21 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:21.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:21 vm09 ceph-mon[50738]: Standby manager daemon b started 2026-03-07T10:30:21.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:21 vm09 ceph-mon[50738]: from='mgr.? 192.168.123.109:0/290952849' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/crt"}]: dispatch 2026-03-07T10:30:21.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:21 vm09 ceph-mon[50738]: from='mgr.? 192.168.123.109:0/290952849' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-07T10:30:21.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:21 vm09 ceph-mon[50738]: from='mgr.? 192.168.123.109:0/290952849' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/key"}]: dispatch 2026-03-07T10:30:21.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:21 vm09 ceph-mon[50738]: from='mgr.? 192.168.123.109:0/290952849' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-07T10:30:21.926 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:21 vm08 ceph-mon[50288]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:21.926 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:21 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-07T10:30:21.926 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:21 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:21.926 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:21 vm08 ceph-mon[50288]: Standby manager daemon b started 2026-03-07T10:30:21.926 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:21 vm08 ceph-mon[50288]: from='mgr.? 192.168.123.109:0/290952849' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/crt"}]: dispatch 2026-03-07T10:30:21.926 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:21 vm08 ceph-mon[50288]: from='mgr.? 192.168.123.109:0/290952849' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-07T10:30:21.927 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:21 vm08 ceph-mon[50288]: from='mgr.? 192.168.123.109:0/290952849' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/key"}]: dispatch 2026-03-07T10:30:21.927 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:21 vm08 ceph-mon[50288]: from='mgr.? 192.168.123.109:0/290952849' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-07T10:30:23.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:22 vm08 ceph-mon[50288]: Deploying daemon osd.0 on vm08 2026-03-07T10:30:23.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:22 vm08 ceph-mon[50288]: mgrmap e14: a(active, since 31s), standbys: b 2026-03-07T10:30:23.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:22 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "b", "id": "b"}]: dispatch 2026-03-07T10:30:23.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:22 vm09 ceph-mon[50738]: Deploying daemon osd.0 on vm08 2026-03-07T10:30:23.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:22 vm09 ceph-mon[50738]: mgrmap e14: a(active, since 31s), standbys: b 2026-03-07T10:30:23.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:22 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "b", "id": "b"}]: dispatch 2026-03-07T10:30:23.968 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:23 vm08 ceph-mon[50288]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:24.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:23 vm09 ceph-mon[50738]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:25.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:24 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:25.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:24 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:25.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:24 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:25.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:24 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:25.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:24 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:25.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:24 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:25.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:24 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:25.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:24 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:25.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:24 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:25.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:24 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:25.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:24 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:25.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:24 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:25.719 INFO:teuthology.orchestra.run.vm08.stdout:Created osd(s) 0 on host 'vm08' 2026-03-07T10:30:25.848 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:25 vm08 ceph-mon[50288]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:25.848 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:25 vm08 ceph-mon[50288]: from='osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-07T10:30:25.848 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:25 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:25.848 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:25 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:30:25.848 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:25 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:25.848 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:25 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:25.848 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:25 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:25.848 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:25 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:25.848 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:25 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:25.877 DEBUG:teuthology.orchestra.run.vm08:osd.0> sudo journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@osd.0.service 2026-03-07T10:30:25.878 INFO:tasks.cephadm:Deploying osd.1 on vm09 with /dev/vde... 2026-03-07T10:30:25.878 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- lvm zap /dev/vde 2026-03-07T10:30:26.026 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.b/config 2026-03-07T10:30:26.048 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:25 vm09 ceph-mon[50738]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:26.049 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:25 vm09 ceph-mon[50738]: from='osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-07T10:30:26.049 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:25 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:26.049 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:25 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:30:26.049 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:25 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:26.049 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:25 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:26.049 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:25 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:26.049 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:25 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:26.049 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:25 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:27.219 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-07T10:30:27.234 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch daemon add osd vm09:/dev/vde 2026-03-07T10:30:27.295 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:27 vm09 ceph-mon[50738]: from='osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-07T10:30:27.295 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:27 vm09 ceph-mon[50738]: osdmap e6: 1 total, 0 up, 1 in 2026-03-07T10:30:27.295 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:27 vm09 ceph-mon[50738]: from='osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-07T10:30:27.295 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:27 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:27.295 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:27 vm09 ceph-mon[50738]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:27.382 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.b/config 2026-03-07T10:30:27.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:27 vm08 ceph-mon[50288]: from='osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-07T10:30:27.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:27 vm08 ceph-mon[50288]: osdmap e6: 1 total, 0 up, 1 in 2026-03-07T10:30:27.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:27 vm08 ceph-mon[50288]: from='osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-07T10:30:27.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:27 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:27.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:27 vm08 ceph-mon[50288]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:28.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:28 vm08 ceph-mon[50288]: from='osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-07T10:30:28.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:28 vm08 ceph-mon[50288]: osdmap e7: 1 total, 0 up, 1 in 2026-03-07T10:30:28.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:28 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:28.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:28 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:28.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:28 vm08 ceph-mon[50288]: from='client.14226 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:28.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:28 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:30:28.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:28 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:30:28.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:28 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:28.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:28 vm08 ceph-mon[50288]: from='osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347]' entity='osd.0' 2026-03-07T10:30:28.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:28 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:28.522 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:30:28 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0[59725]: 2026-03-07T10:30:28.239+0000 7f8a9b745640 -1 osd.0 0 waiting for initial osdmap 2026-03-07T10:30:28.522 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:30:28 vm08 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0[59725]: 2026-03-07T10:30:28.246+0000 7f8a9655b640 -1 osd.0 7 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-07T10:30:28.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:28 vm09 ceph-mon[50738]: from='osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-07T10:30:28.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:28 vm09 ceph-mon[50738]: osdmap e7: 1 total, 0 up, 1 in 2026-03-07T10:30:28.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:28 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:28.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:28 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:28.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:28 vm09 ceph-mon[50738]: from='client.14226 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:28.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:28 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:30:28.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:28 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:30:28.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:28 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:28.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:28 vm09 ceph-mon[50738]: from='osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347]' entity='osd.0' 2026-03-07T10:30:28.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:28 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:29.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:29 vm09 ceph-mon[50738]: purged_snaps scrub starts 2026-03-07T10:30:29.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:29 vm09 ceph-mon[50738]: purged_snaps scrub ok 2026-03-07T10:30:29.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:29 vm09 ceph-mon[50738]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:29.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:29 vm09 ceph-mon[50738]: from='client.? 192.168.123.109:0/3185823185' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0ffb7315-b0af-4ea5-b668-36cda2348782"}]: dispatch 2026-03-07T10:30:29.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:29 vm09 ceph-mon[50738]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0ffb7315-b0af-4ea5-b668-36cda2348782"}]: dispatch 2026-03-07T10:30:29.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:29 vm09 ceph-mon[50738]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0ffb7315-b0af-4ea5-b668-36cda2348782"}]': finished 2026-03-07T10:30:29.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:29 vm09 ceph-mon[50738]: osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347] boot 2026-03-07T10:30:29.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:29 vm09 ceph-mon[50738]: osdmap e8: 2 total, 1 up, 2 in 2026-03-07T10:30:29.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:29 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:29.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:29 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:29.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:29 vm09 ceph-mon[50738]: from='client.? 192.168.123.109:0/3146299794' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:30:29.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:29 vm08 ceph-mon[50288]: purged_snaps scrub starts 2026-03-07T10:30:29.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:29 vm08 ceph-mon[50288]: purged_snaps scrub ok 2026-03-07T10:30:29.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:29 vm08 ceph-mon[50288]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:30:29.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:29 vm08 ceph-mon[50288]: from='client.? 192.168.123.109:0/3185823185' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0ffb7315-b0af-4ea5-b668-36cda2348782"}]: dispatch 2026-03-07T10:30:29.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:29 vm08 ceph-mon[50288]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0ffb7315-b0af-4ea5-b668-36cda2348782"}]: dispatch 2026-03-07T10:30:29.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:29 vm08 ceph-mon[50288]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0ffb7315-b0af-4ea5-b668-36cda2348782"}]': finished 2026-03-07T10:30:29.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:29 vm08 ceph-mon[50288]: osd.0 [v2:192.168.123.108:6802/3596634347,v1:192.168.123.108:6803/3596634347] boot 2026-03-07T10:30:29.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:29 vm08 ceph-mon[50288]: osdmap e8: 2 total, 1 up, 2 in 2026-03-07T10:30:29.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:29 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:30:29.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:29 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:29.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:29 vm08 ceph-mon[50288]: from='client.? 192.168.123.109:0/3146299794' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:30:31.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:31 vm09 ceph-mon[50738]: osdmap e9: 2 total, 1 up, 2 in 2026-03-07T10:30:31.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:31 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:31.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:31 vm09 ceph-mon[50738]: pgmap v19: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:31.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:31 vm08 ceph-mon[50288]: osdmap e9: 2 total, 1 up, 2 in 2026-03-07T10:30:31.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:31 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:31.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:31 vm08 ceph-mon[50288]: pgmap v19: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:33.727 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:33 vm09 ceph-mon[50738]: pgmap v20: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:33.728 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:33 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:30:33.728 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:33 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:34.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:33 vm08 ceph-mon[50288]: pgmap v20: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:34.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:33 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:30:34.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:33 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: Deploying daemon osd.1 on vm09 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:30:34.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:34 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:35.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: Deploying daemon osd.1 on vm09 2026-03-07T10:30:35.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:35.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:35.023 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:35.023 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.023 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.023 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:30:35.023 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:34 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:35.742 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:35 vm08 ceph-mon[50288]: pgmap v21: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:35.743 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:35 vm08 ceph-mon[50288]: pgmap v22: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:35.743 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:35 vm08 ceph-mon[50288]: pgmap v23: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:35.743 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:35 vm08 ceph-mon[50288]: Reconfiguring osd.1 (unknown last config time)... 2026-03-07T10:30:35.743 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:35 vm08 ceph-mon[50288]: Reconfiguring daemon osd.1 on vm09 2026-03-07T10:30:35.743 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:35 vm08 ceph-mon[50288]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-07T10:30:35.743 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:35 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.743 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:35 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.743 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:35 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.948 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:35 vm09 ceph-mon[50738]: pgmap v21: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:35.948 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:35 vm09 ceph-mon[50738]: pgmap v22: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:35.948 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:35 vm09 ceph-mon[50738]: pgmap v23: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:35.948 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:35 vm09 ceph-mon[50738]: Reconfiguring osd.1 (unknown last config time)... 2026-03-07T10:30:35.948 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:35 vm09 ceph-mon[50738]: Reconfiguring daemon osd.1 on vm09 2026-03-07T10:30:35.948 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:35 vm09 ceph-mon[50738]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-07T10:30:35.948 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:35 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.948 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:35 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:35.948 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:35 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: pgmap v24: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: pgmap v25: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: Detected new or changed devices on vm08 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-07T10:30:36.783 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:36 vm09 ceph-mon[50738]: Cluster is now healthy 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: pgmap v24: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: pgmap v25: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: Detected new or changed devices on vm08 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-07T10:30:37.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:36 vm08 ceph-mon[50288]: Cluster is now healthy 2026-03-07T10:30:38.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:37 vm08 ceph-mon[50288]: pgmap v26: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:38.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:37 vm08 ceph-mon[50288]: pgmap v27: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:38.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:37 vm08 ceph-mon[50288]: pgmap v28: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:38.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:37 vm08 ceph-mon[50288]: from='osd.1 [v2:192.168.123.109:6800/2442476306,v1:192.168.123.109:6801/2442476306]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:30:38.014 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:37 vm08 ceph-mon[50288]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:30:38.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:37 vm09 ceph-mon[50738]: pgmap v26: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:38.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:37 vm09 ceph-mon[50738]: pgmap v27: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:38.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:37 vm09 ceph-mon[50738]: pgmap v28: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:38.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:37 vm09 ceph-mon[50738]: from='osd.1 [v2:192.168.123.109:6800/2442476306,v1:192.168.123.109:6801/2442476306]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:30:38.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:37 vm09 ceph-mon[50738]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:30:38.265 INFO:teuthology.orchestra.run.vm09.stdout:Created osd(s) 1 on host 'vm09' 2026-03-07T10:30:38.418 DEBUG:teuthology.orchestra.run.vm09:osd.1> sudo journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@osd.1.service 2026-03-07T10:30:38.419 INFO:tasks.cephadm:Waiting for 2 OSDs to come up... 2026-03-07T10:30:38.419 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph osd stat -f json 2026-03-07T10:30:38.615 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: osdmap e10: 2 total, 1 up, 2 in 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: from='osd.1 [v2:192.168.123.109:6800/2442476306,v1:192.168.123.109:6801/2442476306]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:38.758 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:38 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: osdmap e10: 2 total, 1 up, 2 in 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: from='osd.1 [v2:192.168.123.109:6800/2442476306,v1:192.168.123.109:6801/2442476306]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:38.961 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:38 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:38.961 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:39.126 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":11,"num_osds":2,"num_up_osds":1,"osd_up_since":1772879428,"num_in_osds":2,"osd_in_since":1772879428,"num_remapped_pgs":0} 2026-03-07T10:30:40.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:39 vm08 ceph-mon[50288]: pgmap v30: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:40.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:39 vm08 ceph-mon[50288]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-07T10:30:40.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:39 vm08 ceph-mon[50288]: osdmap e11: 2 total, 1 up, 2 in 2026-03-07T10:30:40.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:39 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:40.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:39 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:40.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:39 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/310140701' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:30:40.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:39 vm08 ceph-mon[50288]: from='osd.1 ' entity='osd.1' 2026-03-07T10:30:40.090 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:39 vm09 ceph-mon[50738]: pgmap v30: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:40.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:39 vm09 ceph-mon[50738]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-07T10:30:40.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:39 vm09 ceph-mon[50738]: osdmap e11: 2 total, 1 up, 2 in 2026-03-07T10:30:40.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:39 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:40.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:39 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:40.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:39 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/310140701' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:30:40.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:39 vm09 ceph-mon[50738]: from='osd.1 ' entity='osd.1' 2026-03-07T10:30:40.091 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:30:39 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1[55819]: 2026-03-07T10:30:39.714+0000 7f8630d82640 -1 osd.1 0 waiting for initial osdmap 2026-03-07T10:30:40.091 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:30:39 vm09 ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1[55819]: 2026-03-07T10:30:39.719+0000 7f862c3ab640 -1 osd.1 11 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-07T10:30:40.126 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph osd stat -f json 2026-03-07T10:30:40.273 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:40.576 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:40.722 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":11,"num_osds":2,"num_up_osds":1,"osd_up_since":1772879428,"num_in_osds":2,"osd_in_since":1772879428,"num_remapped_pgs":0} 2026-03-07T10:30:41.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:40 vm08 ceph-mon[50288]: purged_snaps scrub starts 2026-03-07T10:30:41.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:40 vm08 ceph-mon[50288]: purged_snaps scrub ok 2026-03-07T10:30:41.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:40 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:41.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:40 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/2048063406' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:30:41.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:40 vm08 ceph-mon[50288]: osd.1 [v2:192.168.123.109:6800/2442476306,v1:192.168.123.109:6801/2442476306] boot 2026-03-07T10:30:41.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:40 vm08 ceph-mon[50288]: osdmap e12: 2 total, 2 up, 2 in 2026-03-07T10:30:41.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:40 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:41.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:40 vm09 ceph-mon[50738]: purged_snaps scrub starts 2026-03-07T10:30:41.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:40 vm09 ceph-mon[50738]: purged_snaps scrub ok 2026-03-07T10:30:41.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:40 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:41.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:40 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/2048063406' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:30:41.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:40 vm09 ceph-mon[50738]: osd.1 [v2:192.168.123.109:6800/2442476306,v1:192.168.123.109:6801/2442476306] boot 2026-03-07T10:30:41.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:40 vm09 ceph-mon[50738]: osdmap e12: 2 total, 2 up, 2 in 2026-03-07T10:30:41.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:40 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:30:41.723 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph osd stat -f json 2026-03-07T10:30:41.871 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:42.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:41 vm08 ceph-mon[50288]: pgmap v32: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:42.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:41 vm09 ceph-mon[50738]: pgmap v32: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:30:42.170 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:42.332 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":13,"num_osds":2,"num_up_osds":2,"osd_up_since":1772879440,"num_in_osds":2,"osd_in_since":1772879428,"num_remapped_pgs":0} 2026-03-07T10:30:42.332 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph osd dump --format=json 2026-03-07T10:30:42.478 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:42.767 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:42 vm08 ceph-mon[50288]: osdmap e13: 2 total, 2 up, 2 in 2026-03-07T10:30:42.767 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:42 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/2897635611' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:30:42.773 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:42.773 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":13,"fsid":"630831e6-1a10-11f1-b289-9dc3f8f14d3d","created":"2026-03-07T10:29:05.890753+0000","modified":"2026-03-07T10:30:41.755748+0000","last_up_change":"2026-03-07T10:30:40.717993+0000","last_in_change":"2026-03-07T10:30:28.829184+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":6,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":0,"max_osd":2,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[],"osds":[{"osd":0,"uuid":"d7e4a242-0a4d-473b-b96f-48aaa3ac7329","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6803","nonce":3596634347}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6805","nonce":3596634347}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6809","nonce":3596634347}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6807","nonce":3596634347}]},"public_addr":"192.168.123.108:6803/3596634347","cluster_addr":"192.168.123.108:6805/3596634347","heartbeat_back_addr":"192.168.123.108:6809/3596634347","heartbeat_front_addr":"192.168.123.108:6807/3596634347","state":["exists","up"]},{"osd":1,"uuid":"0ffb7315-b0af-4ea5-b668-36cda2348782","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6801","nonce":2442476306}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6803","nonce":2442476306}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6807","nonce":2442476306}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6805","nonce":2442476306}]},"public_addr":"192.168.123.109:6801/2442476306","cluster_addr":"192.168.123.109:6803/2442476306","heartbeat_back_addr":"192.168.123.109:6807/2442476306","heartbeat_front_addr":"192.168.123.109:6805/2442476306","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:30:26.606855+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:30:38.691249+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.108:0/169179866":"2026-03-08T10:29:50.655228+0000","192.168.123.108:6801/3320437592":"2026-03-08T10:29:50.655228+0000","192.168.123.108:0/3933809253":"2026-03-08T10:29:50.655228+0000","192.168.123.108:6800/3320437592":"2026-03-08T10:29:50.655228+0000","192.168.123.108:0/2238453837":"2026-03-08T10:29:50.655228+0000","192.168.123.108:0/3828913339":"2026-03-08T10:29:33.208356+0000","192.168.123.108:0/2026846119":"2026-03-08T10:29:33.208356+0000","192.168.123.108:0/3516285013":"2026-03-08T10:29:33.208356+0000","192.168.123.108:6801/1900286486":"2026-03-08T10:29:33.208356+0000","192.168.123.108:6800/1900286486":"2026-03-08T10:29:33.208356+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-07T10:30:42.920 INFO:tasks.cephadm.ceph_manager.ceph:[] 2026-03-07T10:30:42.920 INFO:tasks.cephadm:Setting up client nodes... 2026-03-07T10:30:42.920 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-07T10:30:42.920 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-07T10:30:42.920 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph mgr dump --format=json 2026-03-07T10:30:43.062 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:43.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:42 vm09 ceph-mon[50738]: osdmap e13: 2 total, 2 up, 2 in 2026-03-07T10:30:43.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:42 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/2897635611' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:30:43.377 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:43.540 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":14,"flags":0,"active_gid":14156,"active_name":"a","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":3793779106},{"type":"v1","addr":"192.168.123.108:6801","nonce":3793779106}]},"active_addr":"192.168.123.108:6801/3793779106","active_change":"2026-03-07T10:29:50.655479+0000","active_mgr_features":4540701547738038271,"available":true,"standbys":[{"gid":14206,"name":"b","mgr_features":4540701547738038271,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.108:8443/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":3,"active_clients":[{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.108:0","nonce":1406436533}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.108:0","nonce":762498618}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.108:0","nonce":1887160217}]}]} 2026-03-07T10:30:43.542 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-07T10:30:43.542 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-07T10:30:43.542 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph osd dump --format=json 2026-03-07T10:30:43.686 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:43.983 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:43 vm08 ceph-mon[50288]: pgmap v35: 0 pgs: ; 0 B data, 453 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:43.984 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:43 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/1440789059' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:30:43.984 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:43 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/331863001' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-07T10:30:43.984 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:43.984 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":13,"fsid":"630831e6-1a10-11f1-b289-9dc3f8f14d3d","created":"2026-03-07T10:29:05.890753+0000","modified":"2026-03-07T10:30:41.755748+0000","last_up_change":"2026-03-07T10:30:40.717993+0000","last_in_change":"2026-03-07T10:30:28.829184+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":6,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":0,"max_osd":2,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[],"osds":[{"osd":0,"uuid":"d7e4a242-0a4d-473b-b96f-48aaa3ac7329","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6803","nonce":3596634347}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6805","nonce":3596634347}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6809","nonce":3596634347}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6807","nonce":3596634347}]},"public_addr":"192.168.123.108:6803/3596634347","cluster_addr":"192.168.123.108:6805/3596634347","heartbeat_back_addr":"192.168.123.108:6809/3596634347","heartbeat_front_addr":"192.168.123.108:6807/3596634347","state":["exists","up"]},{"osd":1,"uuid":"0ffb7315-b0af-4ea5-b668-36cda2348782","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6801","nonce":2442476306}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6803","nonce":2442476306}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6807","nonce":2442476306}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6805","nonce":2442476306}]},"public_addr":"192.168.123.109:6801/2442476306","cluster_addr":"192.168.123.109:6803/2442476306","heartbeat_back_addr":"192.168.123.109:6807/2442476306","heartbeat_front_addr":"192.168.123.109:6805/2442476306","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:30:26.606855+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:30:38.691249+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.108:0/169179866":"2026-03-08T10:29:50.655228+0000","192.168.123.108:6801/3320437592":"2026-03-08T10:29:50.655228+0000","192.168.123.108:0/3933809253":"2026-03-08T10:29:50.655228+0000","192.168.123.108:6800/3320437592":"2026-03-08T10:29:50.655228+0000","192.168.123.108:0/2238453837":"2026-03-08T10:29:50.655228+0000","192.168.123.108:0/3828913339":"2026-03-08T10:29:33.208356+0000","192.168.123.108:0/2026846119":"2026-03-08T10:29:33.208356+0000","192.168.123.108:0/3516285013":"2026-03-08T10:29:33.208356+0000","192.168.123.108:6801/1900286486":"2026-03-08T10:29:33.208356+0000","192.168.123.108:6800/1900286486":"2026-03-08T10:29:33.208356+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-07T10:30:44.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:43 vm09 ceph-mon[50738]: pgmap v35: 0 pgs: ; 0 B data, 453 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:44.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:43 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/1440789059' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:30:44.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:43 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/331863001' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-07T10:30:44.128 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-07T10:30:44.128 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph osd dump --format=json 2026-03-07T10:30:44.271 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:44.569 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:44.569 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":13,"fsid":"630831e6-1a10-11f1-b289-9dc3f8f14d3d","created":"2026-03-07T10:29:05.890753+0000","modified":"2026-03-07T10:30:41.755748+0000","last_up_change":"2026-03-07T10:30:40.717993+0000","last_in_change":"2026-03-07T10:30:28.829184+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":6,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":0,"max_osd":2,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[],"osds":[{"osd":0,"uuid":"d7e4a242-0a4d-473b-b96f-48aaa3ac7329","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6803","nonce":3596634347}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6805","nonce":3596634347}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6809","nonce":3596634347}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":3596634347},{"type":"v1","addr":"192.168.123.108:6807","nonce":3596634347}]},"public_addr":"192.168.123.108:6803/3596634347","cluster_addr":"192.168.123.108:6805/3596634347","heartbeat_back_addr":"192.168.123.108:6809/3596634347","heartbeat_front_addr":"192.168.123.108:6807/3596634347","state":["exists","up"]},{"osd":1,"uuid":"0ffb7315-b0af-4ea5-b668-36cda2348782","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6801","nonce":2442476306}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6803","nonce":2442476306}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6807","nonce":2442476306}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":2442476306},{"type":"v1","addr":"192.168.123.109:6805","nonce":2442476306}]},"public_addr":"192.168.123.109:6801/2442476306","cluster_addr":"192.168.123.109:6803/2442476306","heartbeat_back_addr":"192.168.123.109:6807/2442476306","heartbeat_front_addr":"192.168.123.109:6805/2442476306","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:30:26.606855+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:30:38.691249+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.108:0/169179866":"2026-03-08T10:29:50.655228+0000","192.168.123.108:6801/3320437592":"2026-03-08T10:29:50.655228+0000","192.168.123.108:0/3933809253":"2026-03-08T10:29:50.655228+0000","192.168.123.108:6800/3320437592":"2026-03-08T10:29:50.655228+0000","192.168.123.108:0/2238453837":"2026-03-08T10:29:50.655228+0000","192.168.123.108:0/3828913339":"2026-03-08T10:29:33.208356+0000","192.168.123.108:0/2026846119":"2026-03-08T10:29:33.208356+0000","192.168.123.108:0/3516285013":"2026-03-08T10:29:33.208356+0000","192.168.123.108:6801/1900286486":"2026-03-08T10:29:33.208356+0000","192.168.123.108:6800/1900286486":"2026-03-08T10:29:33.208356+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-07T10:30:44.713 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph tell osd.0 flush_pg_stats 2026-03-07T10:30:44.714 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph tell osd.1 flush_pg_stats 2026-03-07T10:30:44.883 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:44.909 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:44.913 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:44 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/455695753' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:30:44.913 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:44 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/942638803' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:30:45.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:44 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/455695753' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:30:45.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:44 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/942638803' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:30:45.273 INFO:teuthology.orchestra.run.vm08.stdout:34359738373 2026-03-07T10:30:45.273 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph osd last-stat-seq osd.0 2026-03-07T10:30:45.301 INFO:teuthology.orchestra.run.vm08.stdout:51539607554 2026-03-07T10:30:45.302 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph osd last-stat-seq osd.1 2026-03-07T10:30:45.452 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:45.541 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:45.798 INFO:teuthology.orchestra.run.vm08.stdout:34359738372 2026-03-07T10:30:45.854 INFO:teuthology.orchestra.run.vm08.stdout:51539607553 2026-03-07T10:30:45.949 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738373 got 34359738372 for osd.0 2026-03-07T10:30:45.999 INFO:tasks.cephadm.ceph_manager.ceph:need seq 51539607554 got 51539607553 for osd.1 2026-03-07T10:30:46.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:45 vm09 ceph-mon[50738]: pgmap v36: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:46.247 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:45 vm08 ceph-mon[50288]: pgmap v36: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:46.950 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph osd last-stat-seq osd.0 2026-03-07T10:30:47.000 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph osd last-stat-seq osd.1 2026-03-07T10:30:47.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:46 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/4074409553' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:30:47.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:46 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/2586398993' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:30:47.107 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:47.127 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:46 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/4074409553' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:30:47.127 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:46 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/2586398993' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:30:47.204 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:47.440 INFO:teuthology.orchestra.run.vm08.stdout:34359738373 2026-03-07T10:30:47.530 INFO:teuthology.orchestra.run.vm08.stdout:51539607555 2026-03-07T10:30:47.605 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738373 got 34359738373 for osd.0 2026-03-07T10:30:47.605 DEBUG:teuthology.parallel:result is None 2026-03-07T10:30:47.680 INFO:tasks.cephadm.ceph_manager.ceph:need seq 51539607554 got 51539607555 for osd.1 2026-03-07T10:30:47.680 DEBUG:teuthology.parallel:result is None 2026-03-07T10:30:47.680 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-07T10:30:47.680 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph pg dump --format=json 2026-03-07T10:30:47.827 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:48.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:47 vm08 ceph-mon[50288]: pgmap v37: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:48.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:47 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/902493120' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:30:48.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:47 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/4170322093' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:30:48.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:47 vm09 ceph-mon[50738]: pgmap v37: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:48.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:47 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/902493120' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:30:48.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:47 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/4170322093' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:30:48.116 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:48.116 INFO:teuthology.orchestra.run.vm08.stderr:dumped all 2026-03-07T10:30:48.260 INFO:teuthology.orchestra.run.vm08.stdout:{"pg_ready":true,"pg_map":{"version":37,"stamp":"2026-03-07T10:30:46.341941+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":0,"num_osds":2,"num_per_pool_osds":2,"num_per_pool_omap_osds":0,"kb":41934848,"kb_used":53904,"kb_used_data":224,"kb_used_omap":3,"kb_used_meta":53628,"kb_avail":41880944,"statfs":{"total":42941284352,"available":42886086656,"internally_reserved":0,"allocated":229376,"data_stored":56464,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":3180,"internal_metadata":54915988},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"0.000000"},"pg_stats":[],"pool_stats":[],"osd_stats":[{"osd":1,"up_from":12,"seq":51539607555,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26952,"kb_used_data":112,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940472,"statfs":{"total":21470642176,"available":21443043328,"internally_reserved":0,"allocated":114688,"data_stored":28232,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":8,"seq":34359738373,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26952,"kb_used_data":112,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940472,"statfs":{"total":21470642176,"available":21443043328,"internally_reserved":0,"allocated":114688,"data_stored":28232,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[]}} 2026-03-07T10:30:48.261 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph pg dump --format=json 2026-03-07T10:30:48.404 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:48.687 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:48.687 INFO:teuthology.orchestra.run.vm08.stderr:dumped all 2026-03-07T10:30:48.833 INFO:teuthology.orchestra.run.vm08.stdout:{"pg_ready":true,"pg_map":{"version":38,"stamp":"2026-03-07T10:30:48.342220+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":0,"num_osds":2,"num_per_pool_osds":2,"num_per_pool_omap_osds":0,"kb":41934848,"kb_used":53904,"kb_used_data":224,"kb_used_omap":3,"kb_used_meta":53628,"kb_avail":41880944,"statfs":{"total":42941284352,"available":42886086656,"internally_reserved":0,"allocated":229376,"data_stored":56464,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":3180,"internal_metadata":54915988},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"0.000000"},"pg_stats":[],"pool_stats":[],"osd_stats":[{"osd":1,"up_from":12,"seq":51539607555,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26952,"kb_used_data":112,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940472,"statfs":{"total":21470642176,"available":21443043328,"internally_reserved":0,"allocated":114688,"data_stored":28232,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":8,"seq":34359738373,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26952,"kb_used_data":112,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940472,"statfs":{"total":21470642176,"available":21443043328,"internally_reserved":0,"allocated":114688,"data_stored":28232,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[]}} 2026-03-07T10:30:48.833 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-07T10:30:48.833 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-07T10:30:48.833 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-07T10:30:48.833 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph health --format=json 2026-03-07T10:30:48.978 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:49.290 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:49.291 INFO:teuthology.orchestra.run.vm08.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-07T10:30:49.453 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-07T10:30:49.453 INFO:tasks.cephadm:Setup complete, yielding 2026-03-07T10:30:49.453 INFO:teuthology.run_tasks:Running task exec... 2026-03-07T10:30:49.455 INFO:teuthology.task.exec:Executing custom commands... 2026-03-07T10:30:49.455 INFO:teuthology.task.exec:Running commands on role host.a host ubuntu@vm08.local 2026-03-07T10:30:49.455 DEBUG:teuthology.orchestra.run.vm08:> sudo TESTDIR=/home/ubuntu/cephtest bash -c 'mkdir /etc/cephadm_testing' 2026-03-07T10:30:49.478 INFO:teuthology.task.exec:Running commands on role host.b host ubuntu@vm09.local 2026-03-07T10:30:49.478 DEBUG:teuthology.orchestra.run.vm09:> sudo TESTDIR=/home/ubuntu/cephtest bash -c 'mkdir /etc/cephadm_testing' 2026-03-07T10:30:49.502 INFO:teuthology.run_tasks:Running task cephadm.apply... 2026-03-07T10:30:49.505 INFO:tasks.cephadm:Applying spec(s): extra_container_args: - --cpus=2 extra_entrypoint_args: - --debug_ms 10 placement: host_pattern: '*' service_type: mon --- custom_configs: - content: "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n\ \ ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\n\ sleep infinity\n" mount_path: /root/write_thing_to_file.sh extra_container_args: - -v - /etc/cephadm_testing:/root/cephadm_testing extra_entrypoint_args: - /root/write_thing_to_file.sh - -c - testing_custom_containers - -o - /root/cephadm_testing/testing.txt placement: host_pattern: '*' service_id: foo service_type: container spec: entrypoint: bash image: quay.io/fedora/fedora:latest --- custom_configs: - content: 'set -e test -f /var/cache/bar/from.txt test -f /var/cache/bar/presized.dat echo ok > /var/cache/bar/primary.txt sleep infinity ' mount_path: /root/init_check.sh extra_entrypoint_args: - /root/init_check.sh placement: host_pattern: '*' service_id: bar service_type: container spec: dirs: - data entrypoint: bash image: quay.io/fedora/fedora:latest init_containers: - entrypoint: bash entrypoint_args: - argument: -c - argument: . /etc/os-release && echo from=$ID > /var/cache/bar/from.txt image: quay.io/centos/centos:latest volume_mounts: data: /var/cache/bar:z - entrypoint: bash entrypoint_args: - argument: -c - argument: test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat volume_mounts: data: /var/cache/bar:z volume_mounts: data: /var/cache/bar:z 2026-03-07T10:30:49.505 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch apply -i - 2026-03-07T10:30:49.662 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:49.797 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:49 vm09 ceph-mon[50738]: from='client.14290 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:49.797 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:49 vm09 ceph-mon[50738]: pgmap v38: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:49.797 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:49 vm09 ceph-mon[50738]: from='client.14294 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:49.797 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:49 vm09 ceph-mon[50738]: from='client.? 192.168.123.108:0/2477188669' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-07T10:30:49.977 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled mon update... 2026-03-07T10:30:49.977 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled container.foo update... 2026-03-07T10:30:49.977 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled container.bar update... 2026-03-07T10:30:49.977 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:49 vm08 ceph-mon[50288]: from='client.14290 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:49.977 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:49 vm08 ceph-mon[50288]: pgmap v38: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:49.977 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:49 vm08 ceph-mon[50288]: from='client.14294 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:49.977 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:49 vm08 ceph-mon[50288]: from='client.? 192.168.123.108:0/2477188669' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-07T10:30:50.132 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-07T10:30:50.134 INFO:tasks.cephadm:Waiting for ceph service mon to start (timeout 300)... 2026-03-07T10:30:50.134 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:30:50.330 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:50.749 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:50.749 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:30:34.567445Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:30:49.976689Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:49.974070Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "running": 0, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:30:34.567522Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:30:34.567491Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "running": 1, "size": 2}, "unmanaged": true}] 2026-03-07T10:30:50.920 INFO:tasks.cephadm:mon has 2/2 2026-03-07T10:30:50.920 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-07T10:30:50.922 INFO:tasks.cephadm:Waiting for ceph service container.foo to start (timeout 300)... 2026-03-07T10:30:50.922 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:30:51.068 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: from='client.24161 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: Saving service mon spec with placement * 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: Saving service container.foo spec with placement * 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: Saving service container.bar spec with placement * 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: Deploying daemon container.foo.vm08 on vm08 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: pgmap v39: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:51.179 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:50 vm08 ceph-mon[50288]: from='client.14306 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: from='client.24161 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: Saving service mon spec with placement * 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: Saving service container.foo spec with placement * 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: Saving service container.bar spec with placement * 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: Deploying daemon container.foo.vm08 on vm08 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: pgmap v39: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:51.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:50 vm09 ceph-mon[50738]: from='client.14306 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:51.370 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:51.371 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:30:34.567445Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:30:49.976689Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:49.974070Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "running": 0, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:30:34.567522Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:30:34.567491Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "running": 1, "size": 2}, "unmanaged": true}] 2026-03-07T10:30:51.514 INFO:tasks.cephadm:container.foo has 0/2 2026-03-07T10:30:52.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:51 vm08 ceph-mon[50288]: from='client.14310 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:52.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:51 vm09 ceph-mon[50738]: from='client.14310 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:52.515 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:30:52.675 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:53.009 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:53.009 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:30:34.567445Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:30:49.976689Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:49.974070Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "running": 0, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:30:34.567522Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:30:34.567491Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "running": 1, "size": 2}, "unmanaged": true}] 2026-03-07T10:30:53.142 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:53 vm08 ceph-mon[50288]: pgmap v40: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:53.167 INFO:tasks.cephadm:container.foo has 0/2 2026-03-07T10:30:53.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:53 vm09 ceph-mon[50738]: pgmap v40: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:54.167 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:30:54.275 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:54 vm08 ceph-mon[50288]: from='client.14314 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:54.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:54 vm09 ceph-mon[50738]: from='client.14314 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:54.416 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:54.746 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:54.746 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:30:35.881734Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:30:49.976689Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:54.424916Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "running": 0, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:30:35.881714Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:30:35.881673Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:30:35.881763Z", "running": 1, "size": 2}, "unmanaged": true}] 2026-03-07T10:30:54.918 INFO:tasks.cephadm:container.foo has 0/2 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: pgmap v41: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: Deploying daemon container.foo.vm09 on vm09 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: pgmap v42: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: Detected new or changed devices on vm09 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: from='client.14318 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:55.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:55 vm08.local ceph-mon[50288]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: pgmap v41: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: Deploying daemon container.foo.vm09 on vm09 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: pgmap v42: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: Detected new or changed devices on vm09 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: from='client.14318 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:55 vm09 ceph-mon[50738]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-07T10:30:55.918 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:30:56.137 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:56.453 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:56 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.453 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:56 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.454 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:56 vm08.local ceph-mon[50288]: pgmap v43: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:56.454 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:56 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.454 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:56 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.454 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:56 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.454 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:56 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.481 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:56.481 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:30:55.446085Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:30:49.976689Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:54.424916Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:30:55.446212Z", "running": 0, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:30:55.446175Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:30:55.446130Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:30:55.446203Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:30:56.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:56 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:56 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:56 vm09 ceph-mon[50738]: pgmap v43: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:56.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:56 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:56 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:56 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:56 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:56.632 INFO:tasks.cephadm:container.foo has 0/2 2026-03-07T10:30:57.633 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:30:57.815 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:57.839 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:57 vm08.local ceph-mon[50288]: pgmap v44: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:57.839 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:57 vm08.local ceph-mon[50288]: from='client.14322 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:57.839 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:57 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:57.839 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:57 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:57.839 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:57 vm08.local ceph-mon[50288]: pgmap v45: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:57.839 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:57 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:57.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:57 vm09 ceph-mon[50738]: pgmap v44: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:57.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:57 vm09 ceph-mon[50738]: from='client.14322 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:57.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:57 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:57.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:57 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:57.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:57 vm09 ceph-mon[50738]: pgmap v45: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:57.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:57 vm09 ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:58.147 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:58.147 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:30:55.446085Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:30:49.976689Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:54.424916Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:30:55.446212Z", "running": 1, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:30:55.446175Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:30:55.446130Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:30:55.446203Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:30:58.317 INFO:tasks.cephadm:container.foo has 1/2 2026-03-07T10:30:59.317 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:30:59.651 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:30:59.821 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:59 vm09.local ceph-mon[50738]: from='client.14326 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:59.821 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:59 vm09.local ceph-mon[50738]: pgmap v46: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:59.821 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:59 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:59.821 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:59 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:59.821 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:59 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:59.821 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:59 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:59.821 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:30:59 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:59.994 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:59 vm08.local ceph-mon[50288]: from='client.14326 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:30:59.995 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:59 vm08.local ceph-mon[50288]: pgmap v46: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:30:59.995 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:59 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:59.995 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:59 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:59.995 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:59 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:59.995 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:59 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:59.995 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:30:59 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:30:59.995 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:30:59.995 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:30:55.446085Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:30:49.976689Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:58.901242Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "running": 1, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:30:55.446175Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:30:55.446130Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:30:55.446203Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:31:00.178 INFO:tasks.cephadm:container.foo has 1/2 2026-03-07T10:31:00.696 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:00 vm09.local ceph-mon[50738]: Deploying daemon container.bar.vm09 on vm09 2026-03-07T10:31:00.696 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:00 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:00.696 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:00 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:00.696 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:00 vm09.local ceph-mon[50738]: pgmap v47: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:00.696 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:00 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:00.696 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:00 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:00.696 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:00 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:00.696 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:00 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:00 vm08.local ceph-mon[50288]: Deploying daemon container.bar.vm09 on vm09 2026-03-07T10:31:01.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:00 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:00 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:00 vm08.local ceph-mon[50288]: pgmap v47: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:01.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:00 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:00 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:00 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:00 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.179 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:31:01.329 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:31:01.636 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:31:01.636 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:31:00.489256Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:30:49.976689Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "last_refresh": "2026-03-07T10:31:00.683822Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:58.901242Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:31:00.489315Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:31:00.489235Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:31:00.489192Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:31:00.489286Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:31:01.637 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:01 vm08.local ceph-mon[50288]: from='client.14330 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:01.637 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:01 vm08.local ceph-mon[50288]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-07T10:31:01.637 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:01 vm08.local ceph-mon[50288]: Cluster is now healthy 2026-03-07T10:31:01.637 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:01 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.638 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:01 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.638 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:01 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.785 INFO:tasks.cephadm:container.foo has 2/2 2026-03-07T10:31:01.785 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-07T10:31:01.787 INFO:tasks.cephadm:Waiting for ceph service container.bar to start (timeout 300)... 2026-03-07T10:31:01.787 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:31:01.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:01 vm09.local ceph-mon[50738]: from='client.14330 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:01.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:01 vm09.local ceph-mon[50738]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-07T10:31:01.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:01 vm09.local ceph-mon[50738]: Cluster is now healthy 2026-03-07T10:31:01.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:01 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:01 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:01 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:01.953 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:31:02.269 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:31:02.269 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:31:00.489256Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:30:49.976689Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "last_refresh": "2026-03-07T10:31:00.683822Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:58.901242Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:31:00.489315Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:31:00.489235Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:31:00.489192Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:31:00.489286Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:31:02.640 INFO:tasks.cephadm:container.bar has 0/2 2026-03-07T10:31:03.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:02 vm08.local ceph-mon[50288]: pgmap v48: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:03.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:02 vm08.local ceph-mon[50288]: from='client.14334 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:03.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:02 vm09.local ceph-mon[50738]: pgmap v48: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:03.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:02 vm09.local ceph-mon[50738]: from='client.14334 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:03.640 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:31:03.800 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:31:03.915 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:03 vm08.local ceph-mon[50288]: from='client.14338 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:03.915 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:03 vm08.local ceph-mon[50288]: pgmap v49: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:04.117 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:31:04.117 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:31:00.489256Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:30:49.976689Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "last_refresh": "2026-03-07T10:31:00.683822Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:58.901242Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:31:00.489315Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:31:00.489235Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:31:00.489192Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:31:00.489286Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:31:04.283 INFO:tasks.cephadm:container.bar has 0/2 2026-03-07T10:31:04.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:03 vm09.local ceph-mon[50738]: from='client.14338 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:04.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:03 vm09.local ceph-mon[50738]: pgmap v49: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:05.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:04 vm08.local ceph-mon[50288]: from='client.14342 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:05.283 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:31:05.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:04 vm09.local ceph-mon[50738]: from='client.14342 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:05.432 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:31:05.813 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:31:05.813 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:31:00.489256Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:30:49.976689Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "last_refresh": "2026-03-07T10:31:00.683822Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:58.901242Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:31:00.489315Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:31:00.489235Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:31:00.489192Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:31:00.489286Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:31:05.980 INFO:tasks.cephadm:container.bar has 0/2 2026-03-07T10:31:06.060 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:05 vm09.local ceph-mon[50738]: pgmap v50: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:06.060 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:05 vm09.local ceph-mon[50738]: from='client.14346 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:06.212 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:05 vm08.local ceph-mon[50288]: pgmap v50: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:06.212 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:05 vm08.local ceph-mon[50288]: from='client.14346 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:06.981 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:31:07.136 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:31:07.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:07 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:07 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:07 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:07 vm08.local ceph-mon[50288]: Deploying daemon container.bar.vm08 on vm08 2026-03-07T10:31:07.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:07 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:07 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:07 vm08.local ceph-mon[50288]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:07 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:07 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:07 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:07 vm09.local ceph-mon[50738]: Deploying daemon container.bar.vm08 on vm08 2026-03-07T10:31:07.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:07 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:07 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:07 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:07.435 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:31:07.436 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:31:00.489256Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:31:06.023169Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "last_refresh": "2026-03-07T10:31:06.896622Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:58.901242Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:31:00.489315Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:31:00.489235Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:31:00.489192Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:31:00.489286Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:31:07.581 INFO:tasks.cephadm:container.bar has 0/2 2026-03-07T10:31:08.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:08 vm08.local ceph-mon[50288]: from='client.14350 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:08.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:08 vm08.local ceph-mon[50288]: pgmap v51: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:08.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:08 vm09.local ceph-mon[50738]: from='client.14350 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:08.341 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:08 vm09.local ceph-mon[50738]: pgmap v51: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:08.582 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:31:08.766 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:31:09.081 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:31:09.081 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:31:00.489256Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:31:06.023169Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "last_refresh": "2026-03-07T10:31:06.896622Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:58.901242Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:31:00.489315Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:31:00.489235Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:31:00.489192Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:31:00.489286Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:31:09.234 INFO:tasks.cephadm:container.bar has 0/2 2026-03-07T10:31:10.234 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:31:10.411 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:31:10.757 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:31:10.757 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:31:00.489256Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:31:06.023169Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "last_refresh": "2026-03-07T10:31:06.896622Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:58.901242Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:31:00.489315Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:31:00.489235Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:31:00.489192Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:31:00.489286Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:31:10.891 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:10 vm08.local ceph-mon[50288]: from='client.14354 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:10.891 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:10 vm08.local ceph-mon[50288]: pgmap v52: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:10.917 INFO:tasks.cephadm:container.bar has 0/2 2026-03-07T10:31:11.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:10 vm09.local ceph-mon[50738]: from='client.14354 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:11.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:10 vm09.local ceph-mon[50738]: pgmap v52: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:11.917 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:31:11.922 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:11 vm08.local ceph-mon[50288]: from='client.24189 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:12.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:11 vm09.local ceph-mon[50738]: from='client.24189 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:31:12.616 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:31:12.879 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:12 vm08.local ceph-mon[50288]: pgmap v53: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:12.973 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:31:12.973 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:31:00.489256Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:31:06.023169Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "last_refresh": "2026-03-07T10:31:06.896622Z", "running": 0, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:58.901242Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:31:00.489315Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:31:00.489235Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:31:00.489192Z", "running": 2, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:31:00.489286Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:31:13.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:12 vm09.local ceph-mon[50738]: pgmap v53: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:13.139 INFO:tasks.cephadm:container.bar has 0/2 2026-03-07T10:31:13.989 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:13 vm08.local systemd[1]: Stopping Ceph mon.a for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:31:13.989 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:13 vm08.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a[50265]: 2026-03-07T10:31:13.892+0000 7faabb0b0640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:31:13.989 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:13 vm08.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a[50265]: 2026-03-07T10:31:13.892+0000 7faabb0b0640 -1 mon.a@0(leader) e2 *** Got Signal Terminated *** 2026-03-07T10:31:14.140 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:31:14.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local podman[66711]: 2026-03-07 10:31:14.06830724 +0000 UTC m=+0.193796459 container died 2752b68a55ccc5dfb0e2b92c1ed6593b2988cfab182f4850b3fe9fdb97e49f66 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:31:14.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local podman[66711]: 2026-03-07 10:31:14.207105495 +0000 UTC m=+0.332594704 container remove 2752b68a55ccc5dfb0e2b92c1ed6593b2988cfab182f4850b3fe9fdb97e49f66 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:31:14.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local bash[66711]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a 2026-03-07T10:31:14.361 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:31:14.596 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local systemd[1]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service: Deactivated successfully. 2026-03-07T10:31:14.596 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local systemd[1]: Stopped Ceph mon.a for 630831e6-1a10-11f1-b289-9dc3f8f14d3d. 2026-03-07T10:31:14.596 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local systemd[1]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service: Consumed 1.888s CPU time. 2026-03-07T10:31:14.596 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local systemd[1]: Starting Ceph mon.a for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:31:14.859 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local podman[67071]: 2026-03-07 10:31:14.671791353 +0000 UTC m=+0.017672226 container create 4bfb6c082a25e8b2f376e9893f25bf7214fb3b7bc1900e67d98e4823349d6281 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-07T10:31:14.859 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local podman[67071]: 2026-03-07 10:31:14.760985943 +0000 UTC m=+0.106866817 container init 4bfb6c082a25e8b2f376e9893f25bf7214fb3b7bc1900e67d98e4823349d6281 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:31:14.859 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local podman[67071]: 2026-03-07 10:31:14.664332392 +0000 UTC m=+0.010213266 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:31:14.859 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local podman[67071]: 2026-03-07 10:31:14.765870716 +0000 UTC m=+0.111751599 container start 4bfb6c082a25e8b2f376e9893f25bf7214fb3b7bc1900e67d98e4823349d6281 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:31:14.859 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local bash[67071]: 4bfb6c082a25e8b2f376e9893f25bf7214fb3b7bc1900e67d98e4823349d6281 2026-03-07T10:31:14.859 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local systemd[1]: Started Ceph mon.a for 630831e6-1a10-11f1-b289-9dc3f8f14d3d. 2026-03-07T10:31:14.859 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: set uid:gid to 167:167 (ceph:ceph) 2026-03-07T10:31:14.859 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable), process ceph-mon, pid 6 2026-03-07T10:31:14.859 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: pidfile_write: ignore empty --pid-file 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: load: jerasure load: lrc 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: RocksDB version: 7.9.2 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Git sha 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Compile date 2026-03-06 13:52:12 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: DB SUMMARY 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: DB Session ID: UMHJ70JZ9HF820JMBFOH 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: CURRENT file: CURRENT 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: IDENTITY file: IDENTITY 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: MANIFEST file: MANIFEST-000015 size: 281 Bytes 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 2, files: 000008.sst 000013.sst 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000014.log size: 4388992 ; 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.error_if_exists: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.create_if_missing: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.paranoid_checks: 1 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.env: 0x55e5762b0ca0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.fs: PosixFileSystem 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.info_log: 0x55e5781fe1a0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_file_opening_threads: 16 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.statistics: (nil) 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.use_fsync: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_log_file_size: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.keep_log_file_num: 1000 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.recycle_log_file_num: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.allow_fallocate: 1 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.allow_mmap_reads: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.allow_mmap_writes: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.use_direct_reads: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.create_missing_column_families: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.db_log_dir: 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.wal_dir: 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.advise_random_on_open: 1 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.db_write_buffer_size: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.write_buffer_manager: 0x55e578203900 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.rate_limiter: (nil) 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.wal_recovery_mode: 2 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.enable_thread_tracking: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.enable_pipelined_write: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.unordered_write: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.row_cache: None 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.wal_filter: None 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.allow_ingest_behind: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.two_write_queues: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.manual_wal_flush: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.wal_compression: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.atomic_flush: 0 2026-03-07T10:31:14.860 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.log_readahead_size: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.best_efforts_recovery: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.allow_data_in_errors: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.db_host_id: __hostname__ 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_background_jobs: 2 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_background_compactions: -1 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_subcompactions: 1 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_total_wal_size: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_open_files: -1 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bytes_per_sync: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_readahead_size: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_background_flushes: -1 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Compression algorithms supported: 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: kZSTD supported: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: kXpressCompression supported: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: kBZip2Compression supported: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: kLZ4Compression supported: 1 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: kZlibCompression supported: 1 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: kLZ4HCCompression supported: 1 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: kSnappyCompression supported: 1 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.merge_operator: 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_filter: None 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_filter_factory: None 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.sst_partitioner_factory: None 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55e5781fe360) 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: cache_index_and_filter_blocks: 1 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: pin_top_level_index_and_filter: 1 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: index_type: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: data_block_index_type: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: index_shortening: 1 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: checksum: 4 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: no_block_cache: 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: block_cache: 0x55e5782231f0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: block_cache_name: BinnedLRUCache 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: block_cache_options: 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: capacity : 536870912 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: num_shard_bits : 4 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: strict_capacity_limit : 0 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: high_pri_pool_ratio: 0.000 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: block_cache_compressed: (nil) 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: persistent_cache: (nil) 2026-03-07T10:31:14.861 INFO:journalctl@ceph.mon.a.vm08.stdout: block_size: 4096 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: block_size_deviation: 10 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: block_restart_interval: 16 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: index_block_restart_interval: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: metadata_block_size: 4096 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: partition_filters: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: use_delta_encoding: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: filter_policy: bloomfilter 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: whole_key_filtering: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: verify_compression: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: read_amp_bytes_per_bit: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: format_version: 5 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: enable_index_compression: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: block_align: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: max_auto_readahead_size: 262144 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: prepopulate_block_cache: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: initial_auto_readahead_size: 8192 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout: num_file_reads_for_auto_readahead: 2 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.write_buffer_size: 33554432 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_write_buffer_number: 2 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compression: NoCompression 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bottommost_compression: Disabled 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.prefix_extractor: nullptr 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.num_levels: 7 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compression_opts.level: 32767 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compression_opts.strategy: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compression_opts.enabled: false 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.target_file_size_base: 67108864 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.arena_block_size: 1048576 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-07T10:31:14.862 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.disable_auto_compactions: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.inplace_update_support: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.bloom_locality: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.max_successive_merges: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.paranoid_file_checks: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.force_consistency_checks: 1 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.report_bg_io_stats: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.ttl: 2592000 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.enable_blob_files: false 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.min_blob_size: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.blob_file_size: 268435456 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.blob_file_starting_level: 0 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 succeeded,manifest_file_number is 15, next_file_number is 17, last_sequence is 246, log_number is 10,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 10 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 10 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 2fa8971e-2fe6-4d86-a6e7-df3caac8cb6d 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879474839098, "job": 1, "event": "recovery_started", "wal_files": [14]} 2026-03-07T10:31:14.863 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #14 mode 2 2026-03-07T10:31:14.952 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:31:14.952 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:31:00.489256Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:31:13.139472Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "last_refresh": "2026-03-07T10:31:14.948815Z", "running": 1, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:58.901242Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:31:00.489315Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:31:00.489235Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:31:14.948685Z", "running": 1, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:31:00.489286Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879474857951, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 18, "file_size": 3786047, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 251, "largest_seqno": 3193, "table_properties": {"data_size": 3774331, "index_size": 7277, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 3397, "raw_key_size": 30641, "raw_average_key_size": 22, "raw_value_size": 3747622, "raw_average_value_size": 2778, "num_data_blocks": 347, "num_entries": 1349, "num_filter_entries": 1349, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1772879474, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "2fa8971e-2fe6-4d86-a6e7-df3caac8cb6d", "db_session_id": "UMHJ70JZ9HF820JMBFOH", "orig_file_number": 18, "seqno_to_time_mapping": "N/A"}} 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879474859805, "job": 1, "event": "recovery_finished"} 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: [db/version_set.cc:5047] Creating manifest 20 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000014.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55e578224e00 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: DB pointer 0x55e57833c000 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: ** DB Stats ** 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: ** Compaction Stats [default] ** 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: L0 3/0 3.69 MB 0.8 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 253.3 0.01 0.00 1 0.014 0 0 0.0 0.0 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: Sum 3/0 3.69 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 253.3 0.01 0.00 1 0.014 0 0 0.0 0.0 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 253.3 0.01 0.00 1 0.014 0 0 0.0 0.0 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: ** Compaction Stats [default] ** 2026-03-07T10:31:15.113 INFO:journalctl@ceph.mon.a.vm08.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 253.3 0.01 0.00 1 0.014 0 0 0.0 0.0 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: Flush(GB): cumulative 0.004, interval 0.004 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: Cumulative compaction: 0.00 GB write, 108.29 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: Interval compaction: 0.00 GB write, 108.29 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: Block cache BinnedLRUCache@0x55e5782231f0#6 capacity: 512.00 MB usage: 13.30 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1e-05 secs_since: 0 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: Block cache entry stats(count,size,portion): FilterBlock(3,4.80 KB,0.000914931%) IndexBlock(3,8.50 KB,0.00162125%) Misc(2,0.95 KB,0.000181794%) 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: starting mon.a rank 0 at public addrs [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] at bind addrs [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon_data /var/lib/ceph/mon/ceph-a fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: mon.a@-1(???) e2 preinit fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: mon.a@-1(???).mds e1 new map 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: mon.a@-1(???).mds e1 print_map 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: e1 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: btime 2026-03-07T10:29:05:887604+0000 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: legacy client fscid: -1 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout: No filesystems configured 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: mon.a@-1(???).osd e13 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: mon.a@-1(???).osd e13 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: mon.a@-1(???).osd e13 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: mon.a@-1(???).osd e13 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:31:15.114 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:14 vm08.local ceph-mon[67123]: mon.a@-1(???).paxosservice(auth 1..9) refresh upgraded, format 0 -> 3 2026-03-07T10:31:15.117 INFO:tasks.cephadm:container.bar has 1/2 2026-03-07T10:31:15.813 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: mon.a calling monitor election 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: monmap epoch 2 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: last_changed 2026-03-07T10:30:04.449183+0000 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: created 2026-03-07T10:29:03.741746+0000 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: min_mon_release 19 (squid) 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: election_strategy: 1 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: 0: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.a 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: fsmap 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: osdmap e13: 2 total, 2 up, 2 in 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: mgrmap e14: a(active, since 84s), standbys: b 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: overall HEALTH_OK 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 ' entity='' 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 ' entity='' 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 ' entity='' 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 ' entity='' 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:15.814 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:15 vm08.local ceph-mon[67123]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: mon.a calling monitor election 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: monmap epoch 2 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: last_changed 2026-03-07T10:30:04.449183+0000 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: created 2026-03-07T10:29:03.741746+0000 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: min_mon_release 19 (squid) 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: election_strategy: 1 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: 0: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.a 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: fsmap 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: osdmap e13: 2 total, 2 up, 2 in 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: mgrmap e14: a(active, since 84s), standbys: b 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: overall HEALTH_OK 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 ' entity='' 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 ' entity='' 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 ' entity='' 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 ' entity='' 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:15.828 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:15 vm09.local ceph-mon[50738]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:16.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local systemd[1]: Stopping Ceph mon.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:31:16.117 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d -- ceph orch ls -f json 2026-03-07T10:31:16.280 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:31:16.488 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b[50701]: 2026-03-07T10:31:16.119+0000 7f729fc5f640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:31:16.488 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b[50701]: 2026-03-07T10:31:16.119+0000 7f729fc5f640 -1 mon.b@1(peon) e2 *** Got Signal Terminated *** 2026-03-07T10:31:16.488 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local podman[59213]: 2026-03-07 10:31:16.365537987 +0000 UTC m=+0.258668287 container died d42739b1670cd377ef4324fb0fbfa9a9607737dc989bd88f7593ed4c6656c824 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:31:16.591 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:31:16.591 INFO:teuthology.orchestra.run.vm08.stdout:[{"events": ["2026-03-07T10:30:00.838143Z service:agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "agent", "service_type": "agent", "status": {"created": "2026-03-07T10:29:50.683210Z", "last_refresh": "2026-03-07T10:31:15.653903Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "set -e\ntest -f /var/cache/bar/from.txt\ntest -f /var/cache/bar/presized.dat\necho ok > /var/cache/bar/primary.txt\nsleep infinity\n", "mount_path": "/root/init_check.sh"}], "events": ["2026-03-07T10:31:13.139472Z service:container.bar [INFO] \"service was created\""], "extra_entrypoint_args": ["/root/init_check.sh"], "placement": {"host_pattern": "*"}, "service_id": "bar", "service_name": "container.bar", "service_type": "container", "spec": {"dirs": ["data"], "entrypoint": "bash", "image": "quay.io/fedora/fedora:latest", "init_containers": [{"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": ". /etc/os-release && echo from=$ID > /var/cache/bar/from.txt", "split": false}], "image": "quay.io/centos/centos:latest", "volume_mounts": {"data": "/var/cache/bar:z"}}, {"entrypoint": "bash", "entrypoint_args": [{"argument": "-c", "split": false}, {"argument": "test -f /var/cache/bar/from.txt && truncate -s 102400 /var/cache/bar/presized.dat", "split": false}], "volume_mounts": {"data": "/var/cache/bar:z"}}], "volume_mounts": {"data": "/var/cache/bar:z"}}, "status": {"created": "2026-03-07T10:30:49.974173Z", "last_refresh": "2026-03-07T10:31:15.654086Z", "running": 2, "size": 2}}, {"custom_configs": [{"content": "while getopts \"o:c:\" opt; do\n case ${opt} in\n o )\n OUT_FILE=${OPTARG}\n ;;\n c )\n CONTENT=${OPTARG}\n esac\ndone\necho $CONTENT > $OUT_FILE\nsleep infinity\n", "mount_path": "/root/write_thing_to_file.sh"}], "events": ["2026-03-07T10:30:58.901242Z service:container.foo [INFO] \"service was created\""], "extra_container_args": ["-v", "/etc/cephadm_testing:/root/cephadm_testing"], "extra_entrypoint_args": ["/root/write_thing_to_file.sh", "-c", "testing_custom_containers", "-o", "/root/cephadm_testing/testing.txt"], "placement": {"host_pattern": "*"}, "service_id": "foo", "service_name": "container.foo", "service_type": "container", "spec": {"entrypoint": "bash", "image": "quay.io/fedora/fedora:latest"}, "status": {"created": "2026-03-07T10:30:49.970886Z", "last_refresh": "2026-03-07T10:31:15.654038Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:12.172661Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm08=a", "vm09=b"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-07T10:30:11.304408Z", "last_refresh": "2026-03-07T10:31:15.653980Z", "running": 2, "size": 2}}, {"events": ["2026-03-07T10:30:49.987242Z service:mon [INFO] \"service was created\""], "extra_container_args": ["--cpus=2"], "extra_entrypoint_args": ["--debug_ms 10"], "placement": {"host_pattern": "*"}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-07T10:30:49.966884Z", "last_refresh": "2026-03-07T10:31:15.653949Z", "running": 1, "size": 2}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", "container_image_name": "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0", "last_refresh": "2026-03-07T10:31:15.654008Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-07T10:31:16.764 INFO:tasks.cephadm:container.bar has 2/2 2026-03-07T10:31:16.764 INFO:teuthology.run_tasks:Running task exec... 2026-03-07T10:31:16.766 INFO:teuthology.task.exec:Executing custom commands... 2026-03-07T10:31:16.766 INFO:teuthology.task.exec:Running commands on role host.a host ubuntu@vm08.local 2026-03-07T10:31:16.766 DEBUG:teuthology.orchestra.run.vm08:> sudo TESTDIR=/home/ubuntu/cephtest bash -c 'set -ex 2026-03-07T10:31:16.766 DEBUG:teuthology.orchestra.run.vm08:> FSID=$(/home/ubuntu/cephtest/cephadm shell -- ceph fsid) 2026-03-07T10:31:16.766 DEBUG:teuthology.orchestra.run.vm08:> sleep 60 2026-03-07T10:31:16.766 DEBUG:teuthology.orchestra.run.vm08:> # check extra container and entrypoint args written to mon unit run file 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> grep "\-\-cpus=2" /var/lib/ceph/$FSID/mon.*/unit.run 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> grep "\-\-debug_ms 10" /var/lib/ceph/$FSID/mon.*/unit.run 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> # check that custom container properly wrote content to file. 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> # This requires the custom config, extra container args, and 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> # entrypoint args to all be working in order for this to have 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> # been written. The container entrypoint was set up with custom_configs, 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> # the content and where to write to with the entrypoint args, and the mounting 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> # of the /etc/cephadm_testing dir with extra container args 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> grep "testing_custom_containers" /etc/cephadm_testing/testing.txt 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> # Verify that container bar'"'"'s init containers and primary container 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> # ran successfully 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> dir=$(find /var/lib/ceph/$FSID -maxdepth 1 -type d -name '"'"'container.bar.*'"'"') 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> test -n "$dir" 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> grep ok ${dir}/data/primary.txt 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> grep from=centos ${dir}/data/from.txt 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> test -s ${dir}/data/presized.dat 2026-03-07T10:31:16.767 DEBUG:teuthology.orchestra.run.vm08:> ' 2026-03-07T10:31:16.788 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local podman[59213]: 2026-03-07 10:31:16.494010837 +0000 UTC m=+0.387141137 container remove d42739b1670cd377ef4324fb0fbfa9a9607737dc989bd88f7593ed4c6656c824 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:31:16.789 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local bash[59213]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b 2026-03-07T10:31:16.789 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local systemd[1]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.b.service: Deactivated successfully. 2026-03-07T10:31:16.789 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local systemd[1]: Stopped Ceph mon.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d. 2026-03-07T10:31:16.789 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local systemd[1]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.b.service: Consumed 1.295s CPU time. 2026-03-07T10:31:16.789 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local systemd[1]: Starting Ceph mon.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:31:16.792 INFO:teuthology.orchestra.run.vm08.stderr:++ /home/ubuntu/cephtest/cephadm shell -- ceph fsid 2026-03-07T10:31:16.929 INFO:teuthology.orchestra.run.vm08.stderr:Inferring fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:16.973 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config 2026-03-07T10:31:17.042 INFO:teuthology.orchestra.run.vm08.stderr:Using ceph image with id '8bccc98d839a' and tag 'cobaltcore-storage-v19.2.3-fasttrack-5' created on 2026-03-06 14:41:18 +0000 UTC 2026-03-07T10:31:17.042 INFO:teuthology.orchestra.run.vm08.stderr:harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local podman[59328]: 2026-03-07 10:31:16.787830957 +0000 UTC m=+0.014280054 container create 5e639306673f12491fa0db3ca10ce2ec372c17f8937b8388ddcb01f3e6ab700f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local podman[59328]: 2026-03-07 10:31:16.822636472 +0000 UTC m=+0.049085569 container init 5e639306673f12491fa0db3ca10ce2ec372c17f8937b8388ddcb01f3e6ab700f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local podman[59328]: 2026-03-07 10:31:16.826809801 +0000 UTC m=+0.053258888 container start 5e639306673f12491fa0db3ca10ce2ec372c17f8937b8388ddcb01f3e6ab700f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local bash[59328]: 5e639306673f12491fa0db3ca10ce2ec372c17f8937b8388ddcb01f3e6ab700f 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local podman[59328]: 2026-03-07 10:31:16.781982261 +0000 UTC m=+0.008431358 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local systemd[1]: Started Ceph mon.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d. 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: set uid:gid to 167:167 (ceph:ceph) 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable), process ceph-mon, pid 6 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: pidfile_write: ignore empty --pid-file 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: load: jerasure load: lrc 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: RocksDB version: 7.9.2 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Git sha 0 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Compile date 2026-03-06 13:52:12 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: DB SUMMARY 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: DB Session ID: 76MDJVGOWJ70YGAXKQUL 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: CURRENT file: CURRENT 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: IDENTITY file: IDENTITY 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: MANIFEST file: MANIFEST-000010 size: 179 Bytes 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: SST files in /var/lib/ceph/mon/ceph-b/store.db dir, Total Num: 1, files: 000008.sst 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-b/store.db: 000009.log size: 5944154 ; 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.error_if_exists: 0 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.create_if_missing: 0 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.paranoid_checks: 1 2026-03-07T10:31:17.093 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.env: 0x557b09287ca0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.fs: PosixFileSystem 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.info_log: 0x557b09f781a0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_file_opening_threads: 16 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.statistics: (nil) 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.use_fsync: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_log_file_size: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.keep_log_file_num: 1000 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.recycle_log_file_num: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.allow_fallocate: 1 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.allow_mmap_reads: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.allow_mmap_writes: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.use_direct_reads: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.create_missing_column_families: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.db_log_dir: 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.wal_dir: 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.advise_random_on_open: 1 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.db_write_buffer_size: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.write_buffer_manager: 0x557b09f7d900 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.rate_limiter: (nil) 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.wal_recovery_mode: 2 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.enable_thread_tracking: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.enable_pipelined_write: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.unordered_write: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.row_cache: None 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.wal_filter: None 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.allow_ingest_behind: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.two_write_queues: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.manual_wal_flush: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.wal_compression: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.atomic_flush: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.log_readahead_size: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.best_efforts_recovery: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.allow_data_in_errors: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.db_host_id: __hostname__ 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_background_jobs: 2 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_background_compactions: -1 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_subcompactions: 1 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-07T10:31:17.094 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_total_wal_size: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_open_files: -1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bytes_per_sync: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_readahead_size: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_background_flushes: -1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Compression algorithms supported: 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: kZSTD supported: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: kXpressCompression supported: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: kBZip2Compression supported: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: kLZ4Compression supported: 1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: kZlibCompression supported: 1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: kLZ4HCCompression supported: 1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: kSnappyCompression supported: 1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000010 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.merge_operator: 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_filter: None 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_filter_factory: None 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.sst_partitioner_factory: None 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x557b09f78360) 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: cache_index_and_filter_blocks: 1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: pin_top_level_index_and_filter: 1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: index_type: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: data_block_index_type: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: index_shortening: 1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: checksum: 4 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: no_block_cache: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache: 0x557b09f9d1f0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_name: BinnedLRUCache 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_options: 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: capacity : 536870912 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: num_shard_bits : 4 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: strict_capacity_limit : 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: high_pri_pool_ratio: 0.000 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_compressed: (nil) 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: persistent_cache: (nil) 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: block_size: 4096 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: block_size_deviation: 10 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: block_restart_interval: 16 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: index_block_restart_interval: 1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: metadata_block_size: 4096 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: partition_filters: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: use_delta_encoding: 1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: filter_policy: bloomfilter 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: whole_key_filtering: 1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: verify_compression: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: read_amp_bytes_per_bit: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: format_version: 5 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: enable_index_compression: 1 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: block_align: 0 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: max_auto_readahead_size: 262144 2026-03-07T10:31:17.095 INFO:journalctl@ceph.mon.b.vm09.stdout: prepopulate_block_cache: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout: initial_auto_readahead_size: 8192 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout: num_file_reads_for_auto_readahead: 2 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.write_buffer_size: 33554432 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_write_buffer_number: 2 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compression: NoCompression 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bottommost_compression: Disabled 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.prefix_extractor: nullptr 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.num_levels: 7 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compression_opts.level: 32767 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compression_opts.strategy: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compression_opts.enabled: false 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.target_file_size_base: 67108864 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.arena_block_size: 1048576 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.disable_auto_compactions: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.inplace_update_support: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-07T10:31:17.096 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.bloom_locality: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.max_successive_merges: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.paranoid_file_checks: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.force_consistency_checks: 1 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.report_bg_io_stats: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.ttl: 2592000 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.enable_blob_files: false 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.min_blob_size: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.blob_file_size: 268435456 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.blob_file_starting_level: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 38d41774-76de-4e79-ad06-f109524b8d98 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879476846170, "job": 1, "event": "recovery_started", "wal_files": [9]} 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879476866156, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 4086477, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 6, "largest_seqno": 3173, "table_properties": {"data_size": 4073305, "index_size": 8093, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 4037, "raw_key_size": 41546, "raw_average_key_size": 25, "raw_value_size": 4039914, "raw_average_value_size": 2521, "num_data_blocks": 386, "num_entries": 1602, "num_filter_entries": 1602, "num_deletions": 4, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1772879476, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "38d41774-76de-4e79-ad06-f109524b8d98", "db_session_id": "76MDJVGOWJ70YGAXKQUL", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}} 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879476866252, "job": 1, "event": "recovery_finished"} 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: [db/version_set.cc:5047] Creating manifest 15 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-b/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x557b09f9ee00 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: DB pointer 0x557b0a0b6000 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: ** DB Stats ** 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: ** Compaction Stats [default] ** 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: L0 2/0 3.90 MB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 307.9 0.01 0.00 1 0.013 0 0 0.0 0.0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Sum 2/0 3.90 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 307.9 0.01 0.00 1 0.013 0 0 0.0 0.0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 307.9 0.01 0.00 1 0.013 0 0 0.0 0.0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: ** Compaction Stats [default] ** 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 307.9 0.01 0.00 1 0.013 0 0 0.0 0.0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Flush(GB): cumulative 0.004, interval 0.004 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative compaction: 0.00 GB write, 164.61 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval compaction: 0.00 GB write, 164.61 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Block cache BinnedLRUCache@0x557b09f9d1f0#6 capacity: 512.00 MB usage: 12.33 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.1e-05 secs_since: 0 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: Block cache entry stats(count,size,portion): FilterBlock(2,4.14 KB,0.000789762%) IndexBlock(2,8.19 KB,0.00156164%) Misc(1,0.00 KB,0%) 2026-03-07T10:31:17.097 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: starting mon.b rank 1 at public addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] at bind addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon_data /var/lib/ceph/mon/ceph-b fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: mon.b@-1(???) e2 preinit fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: mon.b@-1(???).mds e1 new map 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: mon.b@-1(???).mds e1 print_map 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout: e1 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout: btime 2026-03-07T10:29:05:887604+0000 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout: legacy client fscid: -1 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout: No filesystems configured 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: mon.b@-1(???).osd e13 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: mon.b@-1(???).osd e13 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: mon.b@-1(???).osd e13 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: mon.b@-1(???).osd e13 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:31:17.098 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:16 vm09.local ceph-mon[59363]: mon.b@-1(???).paxosservice(auth 1..10) refresh upgraded, format 0 -> 3 2026-03-07T10:31:17.567 INFO:teuthology.orchestra.run.vm08.stderr:+ FSID=630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:17.567 INFO:teuthology.orchestra.run.vm08.stderr:+ sleep 60 2026-03-07T10:31:17.917 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:17 vm08.local systemd[1]: Stopping Ceph mon.a for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:31:17.917 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:17 vm08.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a[67081]: 2026-03-07T10:31:17.782+0000 7fd91ce7a640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:31:17.917 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:17 vm08.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a[67081]: 2026-03-07T10:31:17.782+0000 7fd91ce7a640 -1 mon.a@0(leader) e2 *** Got Signal Terminated *** 2026-03-07T10:31:17.917 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:17 vm08.local podman[67915]: 2026-03-07 10:31:17.806562666 +0000 UTC m=+0.037297396 container died 4bfb6c082a25e8b2f376e9893f25bf7214fb3b7bc1900e67d98e4823349d6281 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:31:18.192 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:17 vm08.local podman[67915]: 2026-03-07 10:31:17.921143957 +0000 UTC m=+0.151878676 container remove 4bfb6c082a25e8b2f376e9893f25bf7214fb3b7bc1900e67d98e4823349d6281 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:31:18.192 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:17 vm08.local bash[67915]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a 2026-03-07T10:31:18.192 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:17 vm08.local systemd[1]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service: Deactivated successfully. 2026-03-07T10:31:18.192 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:17 vm08.local systemd[1]: Stopped Ceph mon.a for 630831e6-1a10-11f1-b289-9dc3f8f14d3d. 2026-03-07T10:31:18.192 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local systemd[1]: Starting Ceph mon.a for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:31:18.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local podman[68036]: 2026-03-07 10:31:18.192337659 +0000 UTC m=+0.018838577 container create 6c0ab8571588ff66eea385bb0e4041e56d96a57f19fd56b515637b5f55db8deb (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-07T10:31:18.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local podman[68036]: 2026-03-07 10:31:18.23033433 +0000 UTC m=+0.056835260 container init 6c0ab8571588ff66eea385bb0e4041e56d96a57f19fd56b515637b5f55db8deb (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local podman[68036]: 2026-03-07 10:31:18.233843037 +0000 UTC m=+0.060343956 container start 6c0ab8571588ff66eea385bb0e4041e56d96a57f19fd56b515637b5f55db8deb (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local bash[68036]: 6c0ab8571588ff66eea385bb0e4041e56d96a57f19fd56b515637b5f55db8deb 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local podman[68036]: 2026-03-07 10:31:18.182671288 +0000 UTC m=+0.009172226 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local systemd[1]: Started Ceph mon.a for 630831e6-1a10-11f1-b289-9dc3f8f14d3d. 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: set uid:gid to 167:167 (ceph:ceph) 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable), process ceph-mon, pid 7 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: pidfile_write: ignore empty --pid-file 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: load: jerasure load: lrc 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: RocksDB version: 7.9.2 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Git sha 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Compile date 2026-03-06 13:52:12 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: DB SUMMARY 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: DB Session ID: 3682EP6Q22NC7QRYD1Y9 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: CURRENT file: CURRENT 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: IDENTITY file: IDENTITY 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: MANIFEST file: MANIFEST-000020 size: 373 Bytes 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 3, files: 000008.sst 000013.sst 000018.sst 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000019.log size: 350995 ; 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.error_if_exists: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.create_if_missing: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.paranoid_checks: 1 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.env: 0x555653487ca0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.fs: PosixFileSystem 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.info_log: 0x5556540541a0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_file_opening_threads: 16 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.statistics: (nil) 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.use_fsync: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_log_file_size: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.keep_log_file_num: 1000 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.recycle_log_file_num: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.allow_fallocate: 1 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.allow_mmap_reads: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.allow_mmap_writes: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.use_direct_reads: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.create_missing_column_families: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.db_log_dir: 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.wal_dir: 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.advise_random_on_open: 1 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.db_write_buffer_size: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.write_buffer_manager: 0x555654059900 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.rate_limiter: (nil) 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-07T10:31:18.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.wal_recovery_mode: 2 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.enable_thread_tracking: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.enable_pipelined_write: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.unordered_write: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.row_cache: None 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.wal_filter: None 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.allow_ingest_behind: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.two_write_queues: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.manual_wal_flush: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.wal_compression: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.atomic_flush: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.log_readahead_size: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.best_efforts_recovery: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.allow_data_in_errors: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.db_host_id: __hostname__ 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_background_jobs: 2 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_background_compactions: -1 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_subcompactions: 1 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_total_wal_size: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_open_files: -1 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bytes_per_sync: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_readahead_size: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_background_flushes: -1 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Compression algorithms supported: 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: kZSTD supported: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: kXpressCompression supported: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: kBZip2Compression supported: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: kLZ4Compression supported: 1 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: kZlibCompression supported: 1 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: kLZ4HCCompression supported: 1 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: kSnappyCompression supported: 1 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000020 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.merge_operator: 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_filter: None 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_filter_factory: None 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.sst_partitioner_factory: None 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-07T10:31:18.524 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x555654054360) 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: cache_index_and_filter_blocks: 1 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: pin_top_level_index_and_filter: 1 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: index_type: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: data_block_index_type: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: index_shortening: 1 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: checksum: 4 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: no_block_cache: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: block_cache: 0x5556540791f0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: block_cache_name: BinnedLRUCache 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: block_cache_options: 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: capacity : 536870912 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: num_shard_bits : 4 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: strict_capacity_limit : 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: high_pri_pool_ratio: 0.000 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: block_cache_compressed: (nil) 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: persistent_cache: (nil) 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: block_size: 4096 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: block_size_deviation: 10 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: block_restart_interval: 16 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: index_block_restart_interval: 1 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: metadata_block_size: 4096 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: partition_filters: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: use_delta_encoding: 1 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: filter_policy: bloomfilter 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: whole_key_filtering: 1 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: verify_compression: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: read_amp_bytes_per_bit: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: format_version: 5 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: enable_index_compression: 1 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: block_align: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: max_auto_readahead_size: 262144 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: prepopulate_block_cache: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: initial_auto_readahead_size: 8192 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout: num_file_reads_for_auto_readahead: 2 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.write_buffer_size: 33554432 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_write_buffer_number: 2 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compression: NoCompression 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bottommost_compression: Disabled 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.prefix_extractor: nullptr 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.num_levels: 7 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compression_opts.level: 32767 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compression_opts.strategy: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compression_opts.enabled: false 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-07T10:31:18.525 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.target_file_size_base: 67108864 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.arena_block_size: 1048576 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.disable_auto_compactions: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.inplace_update_support: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.bloom_locality: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.max_successive_merges: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.paranoid_file_checks: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.force_consistency_checks: 1 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.report_bg_io_stats: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.ttl: 2592000 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.enable_blob_files: false 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.min_blob_size: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.blob_file_size: 268435456 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.blob_file_starting_level: 0 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000020 succeeded,manifest_file_number is 20, next_file_number is 22, last_sequence is 3193, log_number is 15,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 15 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 15 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 2fa8971e-2fe6-4d86-a6e7-df3caac8cb6d 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879478259489, "job": 1, "event": "recovery_started", "wal_files": [19]} 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #19 mode 2 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879478262129, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 23, "file_size": 296430, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 3199, "largest_seqno": 3387, "table_properties": {"data_size": 294250, "index_size": 885, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 261, "raw_key_size": 2196, "raw_average_key_size": 23, "raw_value_size": 291818, "raw_average_value_size": 3171, "num_data_blocks": 39, "num_entries": 92, "num_filter_entries": 92, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1772879478, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "2fa8971e-2fe6-4d86-a6e7-df3caac8cb6d", "db_session_id": "3682EP6Q22NC7QRYD1Y9", "orig_file_number": 23, "seqno_to_time_mapping": "N/A"}} 2026-03-07T10:31:18.526 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879478262255, "job": 1, "event": "recovery_finished"} 2026-03-07T10:31:18.527 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: rocksdb: [db/version_set.cc:5047] Creating manifest 25 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: Redeploying mon.a, (entrypoint args changed) . . . 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: Deploying daemon mon.a on vm08 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: mon.a calling monitor election 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: monmap epoch 2 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: last_changed 2026-03-07T10:30:04.449183+0000 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: created 2026-03-07T10:29:03.741746+0000 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: min_mon_release 19 (squid) 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: election_strategy: 1 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: 0: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.a 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: fsmap 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: osdmap e13: 2 total, 2 up, 2 in 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: mgrmap e14: a(active, since 88s), standbys: b 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: daemon mon.a on vm08 is in unknown state 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: from='mgr.14156 ' entity='' 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:31:19.221 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:18 vm09.local ceph-mon[59363]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:31:19.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: Redeploying mon.a, (entrypoint args changed) . . . 2026-03-07T10:31:19.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: Deploying daemon mon.a on vm08 2026-03-07T10:31:19.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:31:19.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:31:19.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: mon.a calling monitor election 2026-03-07T10:31:19.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:31:19.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: monmap epoch 2 2026-03-07T10:31:19.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:19.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: last_changed 2026-03-07T10:30:04.449183+0000 2026-03-07T10:31:19.272 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: created 2026-03-07T10:29:03.741746+0000 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: min_mon_release 19 (squid) 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: election_strategy: 1 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: 0: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.a 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: fsmap 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: osdmap e13: 2 total, 2 up, 2 in 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: mgrmap e14: a(active, since 88s), standbys: b 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: daemon mon.a on vm08 is in unknown state 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: from='mgr.14156 ' entity='' 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: from='mgr.14156 ' entity='mgr.a' 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:31:19.273 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:18 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:31:19.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:19 vm09.local systemd[1]: Stopping Ceph mon.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:31:19.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:19 vm09.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b[59338]: 2026-03-07T10:31:19.445+0000 7f67f6e7e640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:31:19.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:19 vm09.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b[59338]: 2026-03-07T10:31:19.445+0000 7f67f6e7e640 -1 mon.b@1(peon) e2 *** Got Signal Terminated *** 2026-03-07T10:31:19.887 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:19 vm09.local podman[59590]: 2026-03-07 10:31:19.632562128 +0000 UTC m=+0.199179311 container died 5e639306673f12491fa0db3ca10ce2ec372c17f8937b8388ddcb01f3e6ab700f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-07T10:31:19.887 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:19 vm09.local podman[59590]: 2026-03-07 10:31:19.750292497 +0000 UTC m=+0.316909690 container remove 5e639306673f12491fa0db3ca10ce2ec372c17f8937b8388ddcb01f3e6ab700f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:31:19.887 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:19 vm09.local bash[59590]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b 2026-03-07T10:31:19.888 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:19 vm09.local systemd[1]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.b.service: Deactivated successfully. 2026-03-07T10:31:19.888 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:19 vm09.local systemd[1]: Stopped Ceph mon.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d. 2026-03-07T10:31:20.342 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:19 vm09.local systemd[1]: Starting Ceph mon.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:31:20.342 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local podman[59712]: 2026-03-07 10:31:20.024651666 +0000 UTC m=+0.013625369 container create c8ea002e8954842473b2b078c811b1193e697da3e56facec8311fd046e6d7fbe (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:31:20.342 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local podman[59712]: 2026-03-07 10:31:20.054849878 +0000 UTC m=+0.043823581 container init c8ea002e8954842473b2b078c811b1193e697da3e56facec8311fd046e6d7fbe (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local podman[59712]: 2026-03-07 10:31:20.058139586 +0000 UTC m=+0.047113289 container start c8ea002e8954842473b2b078c811b1193e697da3e56facec8311fd046e6d7fbe (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local bash[59712]: c8ea002e8954842473b2b078c811b1193e697da3e56facec8311fd046e6d7fbe 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local podman[59712]: 2026-03-07 10:31:20.019355786 +0000 UTC m=+0.008329489 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local systemd[1]: Started Ceph mon.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d. 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: set uid:gid to 167:167 (ceph:ceph) 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable), process ceph-mon, pid 6 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: pidfile_write: ignore empty --pid-file 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: load: jerasure load: lrc 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: RocksDB version: 7.9.2 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Git sha 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Compile date 2026-03-06 13:52:12 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: DB SUMMARY 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: DB Session ID: NXNET5W88O4MP95JT6P7 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: CURRENT file: CURRENT 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: IDENTITY file: IDENTITY 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: MANIFEST file: MANIFEST-000015 size: 282 Bytes 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: SST files in /var/lib/ceph/mon/ceph-b/store.db dir, Total Num: 2, files: 000008.sst 000013.sst 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-b/store.db: 000014.log size: 184127 ; 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.error_if_exists: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.create_if_missing: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.paranoid_checks: 1 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.env: 0x55a73b119ca0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.fs: PosixFileSystem 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.info_log: 0x55a73bb241a0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_file_opening_threads: 16 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.statistics: (nil) 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.use_fsync: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_log_file_size: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.keep_log_file_num: 1000 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.recycle_log_file_num: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.allow_fallocate: 1 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.allow_mmap_reads: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.allow_mmap_writes: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.use_direct_reads: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.create_missing_column_families: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.db_log_dir: 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.wal_dir: 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.advise_random_on_open: 1 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.db_write_buffer_size: 0 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.write_buffer_manager: 0x55a73bb29900 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-07T10:31:20.343 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.rate_limiter: (nil) 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.wal_recovery_mode: 2 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.enable_thread_tracking: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.enable_pipelined_write: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.unordered_write: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.row_cache: None 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.wal_filter: None 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.allow_ingest_behind: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.two_write_queues: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.manual_wal_flush: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.wal_compression: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.atomic_flush: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.log_readahead_size: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.best_efforts_recovery: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.allow_data_in_errors: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.db_host_id: __hostname__ 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_background_jobs: 2 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_background_compactions: -1 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_subcompactions: 1 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_total_wal_size: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_open_files: -1 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bytes_per_sync: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_readahead_size: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_background_flushes: -1 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Compression algorithms supported: 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: kZSTD supported: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: kXpressCompression supported: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: kBZip2Compression supported: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: kLZ4Compression supported: 1 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: kZlibCompression supported: 1 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: kLZ4HCCompression supported: 1 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: kSnappyCompression supported: 1 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000015 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.merge_operator: 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_filter: None 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_filter_factory: None 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.sst_partitioner_factory: None 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55a73bb24360) 2026-03-07T10:31:20.344 INFO:journalctl@ceph.mon.b.vm09.stdout: cache_index_and_filter_blocks: 1 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: pin_top_level_index_and_filter: 1 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: index_type: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: data_block_index_type: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: index_shortening: 1 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: checksum: 4 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: no_block_cache: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache: 0x55a73bb491f0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_name: BinnedLRUCache 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_options: 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: capacity : 536870912 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: num_shard_bits : 4 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: strict_capacity_limit : 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: high_pri_pool_ratio: 0.000 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_compressed: (nil) 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: persistent_cache: (nil) 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: block_size: 4096 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: block_size_deviation: 10 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: block_restart_interval: 16 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: index_block_restart_interval: 1 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: metadata_block_size: 4096 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: partition_filters: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: use_delta_encoding: 1 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: filter_policy: bloomfilter 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: whole_key_filtering: 1 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: verify_compression: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: read_amp_bytes_per_bit: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: format_version: 5 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: enable_index_compression: 1 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: block_align: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: max_auto_readahead_size: 262144 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: prepopulate_block_cache: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: initial_auto_readahead_size: 8192 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout: num_file_reads_for_auto_readahead: 2 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.write_buffer_size: 33554432 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_write_buffer_number: 2 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compression: NoCompression 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bottommost_compression: Disabled 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.prefix_extractor: nullptr 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.num_levels: 7 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compression_opts.level: 32767 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compression_opts.strategy: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compression_opts.enabled: false 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.target_file_size_base: 67108864 2026-03-07T10:31:20.345 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.arena_block_size: 1048576 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.disable_auto_compactions: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.inplace_update_support: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.bloom_locality: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.max_successive_merges: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.paranoid_file_checks: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.force_consistency_checks: 1 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.report_bg_io_stats: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.ttl: 2592000 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.enable_blob_files: false 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.min_blob_size: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.blob_file_size: 268435456 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.blob_file_starting_level: 0 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000015 succeeded,manifest_file_number is 15, next_file_number is 17, last_sequence is 3173, log_number is 10,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 10 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 10 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 38d41774-76de-4e79-ad06-f109524b8d98 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879480080194, "job": 1, "event": "recovery_started", "wal_files": [14]} 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #14 mode 2 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879480082258, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 18, "file_size": 121014, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 3185, "largest_seqno": 3276, "table_properties": {"data_size": 119340, "index_size": 443, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 197, "raw_key_size": 1589, "raw_average_key_size": 26, "raw_value_size": 117795, "raw_average_value_size": 1963, "num_data_blocks": 17, "num_entries": 60, "num_filter_entries": 60, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1772879480, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "38d41774-76de-4e79-ad06-f109524b8d98", "db_session_id": "NXNET5W88O4MP95JT6P7", "orig_file_number": 18, "seqno_to_time_mapping": "N/A"}} 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772879480082306, "job": 1, "event": "recovery_finished"} 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: [db/version_set.cc:5047] Creating manifest 20 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-b/store.db/000014.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55a73bb4ae00 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: DB pointer 0x55a73bc66000 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout: ** DB Stats ** 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-07T10:31:20.346 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: ** Compaction Stats [default] ** 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: L0 3/0 4.01 MB 0.8 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 63.2 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Sum 3/0 4.01 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 63.2 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 63.2 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: ** Compaction Stats [default] ** 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 63.2 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative compaction: 0.00 GB write, 23.12 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval compaction: 0.00 GB write, 23.12 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Block cache BinnedLRUCache@0x55a73bb491f0#6 capacity: 512.00 MB usage: 13.08 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 5e-06 secs_since: 0 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: Block cache entry stats(count,size,portion): FilterBlock(3,4.38 KB,0.000834465%) IndexBlock(3,8.70 KB,0.00165999%) Misc(1,0.00 KB,0%) 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: CompressorRegistry(0x55a73bb11418) _refresh_config ms_osd_compression_mode 0 ms_osd_compression_methods [1] ms_osd_compress_above_min_size 1024 ms_compress_secure 0 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: Event(0x55a73bb7c088 nevent=5000 time_id=1).set_owner center_id=0 owner=140397611861568 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: stack operator() starting 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: Event(0x55a73bb7c2c8 nevent=5000 time_id=1).set_owner center_id=1 owner=140397620254272 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: stack operator() starting 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: Event(0x55a73bb7c508 nevent=5000 time_id=1).set_owner center_id=2 owner=140397712574016 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: stack operator() starting 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: starting mon.b rank 1 at public addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] at bind addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon_data /var/lib/ceph/mon/ceph-b fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: CompressorRegistry(0x55a73bb11d18) _refresh_config ms_osd_compression_mode 0 ms_osd_compression_methods [1] ms_osd_compress_above_min_size 1024 ms_compress_secure 0 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???) e2 preinit fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).mds e0 Unable to load 'last_metadata' 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).mds e0 Unable to load 'last_metadata' 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).mds e1 new map 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).mds e1 print_map 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: e1 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: btime 2026-03-07T10:29:05:887604+0000 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: legacy client fscid: -1 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout: No filesystems configured 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).osd e13 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).osd e13 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).osd e13 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).osd e13 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).paxosservice(auth 1..11) refresh upgraded, format 0 -> 3 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).mgr e0 loading version 14 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).mgr e14 active server: [v2:192.168.123.108:6800/3793779106,v1:192.168.123.108:6801/3793779106](14156) 2026-03-07T10:31:20.347 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:20 vm09.local ceph-mon[59746]: mon.b@-1(???).mgr e14 mkfs or daemon transitioned to available, loading commands 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: mon.b calling monitor election 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: mon.a calling monitor election 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: monmap epoch 2 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: last_changed 2026-03-07T10:30:04.449183+0000 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: created 2026-03-07T10:29:03.741746+0000 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: min_mon_release 19 (squid) 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: election_strategy: 1 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: 0: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.a 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: fsmap 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: osdmap e13: 2 total, 2 up, 2 in 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: mgrmap e14: a(active, since 89s), standbys: b 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: daemon mon.a on vm08 is in unknown state 2026-03-07T10:31:21.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:21.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:21.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:31:21.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:31:21.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:31:21.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: pgmap v61: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:21.523 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:21 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: mon.b calling monitor election 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: mon.a calling monitor election 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: monmap epoch 2 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: last_changed 2026-03-07T10:30:04.449183+0000 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: created 2026-03-07T10:29:03.741746+0000 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: min_mon_release 19 (squid) 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: election_strategy: 1 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: 0: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.a 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: fsmap 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: osdmap e13: 2 total, 2 up, 2 in 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: mgrmap e14: a(active, since 89s), standbys: b 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: daemon mon.a on vm08 is in unknown state 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: pgmap v61: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:21.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:21 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:22.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:22 vm08.local ceph-mon[68070]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-07T10:31:22.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:22 vm08.local ceph-mon[68070]: Cluster is now healthy 2026-03-07T10:31:22.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:22 vm09.local ceph-mon[59746]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-07T10:31:22.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:22 vm09.local ceph-mon[59746]: Cluster is now healthy 2026-03-07T10:31:23.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:23 vm08.local ceph-mon[68070]: pgmap v62: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:23.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:23 vm09.local ceph-mon[59746]: pgmap v62: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:25.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:25 vm08.local ceph-mon[68070]: pgmap v63: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:25.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:25 vm09.local ceph-mon[59746]: pgmap v63: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:27.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:27 vm08.local ceph-mon[68070]: pgmap v64: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:27.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:27 vm09.local ceph-mon[59746]: pgmap v64: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:29.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:29 vm08.local ceph-mon[68070]: pgmap v65: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:29.590 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:29 vm09.local ceph-mon[59746]: pgmap v65: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:31.425 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:31 vm09.local ceph-mon[59746]: pgmap v66: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:31.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:31 vm08.local ceph-mon[68070]: pgmap v66: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:33.522 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:33 vm08.local ceph-mon[68070]: pgmap v67: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:33.591 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:33 vm09.local ceph-mon[59746]: pgmap v67: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:35.433 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:35 vm08.local ceph-mon[68070]: pgmap v68: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:35.535 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:35 vm09.local ceph-mon[59746]: pgmap v68: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:36.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:36 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:36.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:36 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:36.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:36 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:36.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:36 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:36.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:36 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:36.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:36 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:36.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:36 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:36.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:36 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:36.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:36 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:36.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:36 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:36.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:36 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:36.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:36 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:37.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:37 vm08.local ceph-mon[68070]: pgmap v69: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:37.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:37 vm09.local ceph-mon[59746]: pgmap v69: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:39.772 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:39 vm08.local ceph-mon[68070]: pgmap v70: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:39.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:39 vm09.local ceph-mon[59746]: pgmap v70: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:41.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:41 vm09.local ceph-mon[59746]: pgmap v71: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:42.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:41 vm08.local ceph-mon[68070]: pgmap v71: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:43.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:43 vm09.local ceph-mon[59746]: pgmap v72: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:44.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:43 vm08.local ceph-mon[68070]: pgmap v72: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:45.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:45 vm09.local ceph-mon[59746]: pgmap v73: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:46.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:45 vm08.local ceph-mon[68070]: pgmap v73: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:47.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:47 vm09.local ceph-mon[59746]: pgmap v74: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:48.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:47 vm08.local ceph-mon[68070]: pgmap v74: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:49.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:49 vm09.local ceph-mon[59746]: pgmap v75: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:50.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:49 vm08.local ceph-mon[68070]: pgmap v75: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:51.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:51 vm09.local ceph-mon[59746]: pgmap v76: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:52.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:51 vm08.local ceph-mon[68070]: pgmap v76: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:53.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:53 vm09.local ceph-mon[59746]: pgmap v77: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:54.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:53 vm08.local ceph-mon[68070]: pgmap v77: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:55.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:55 vm09.local ceph-mon[59746]: pgmap v78: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:55.911 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:55 vm08.local ceph-mon[68070]: pgmap v78: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:57.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:56 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:56 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:56 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:56 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:56 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:56 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:56 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:56 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:56 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:56 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:56 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:56 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:56 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:56 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:56 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:56 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:56 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:56 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:31:57.966 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:57 vm09.local ceph-mon[59746]: pgmap v79: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:31:58.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:57 vm08.local ceph-mon[68070]: pgmap v79: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:00.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:31:59 vm08.local ceph-mon[68070]: pgmap v80: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:00.090 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:31:59 vm09.local ceph-mon[59746]: pgmap v80: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:02.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:01 vm08.local ceph-mon[68070]: pgmap v81: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:02.090 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:01 vm09.local ceph-mon[59746]: pgmap v81: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:04.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:03 vm08.local ceph-mon[68070]: pgmap v82: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:04.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:03 vm09.local ceph-mon[59746]: pgmap v82: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:06.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:05 vm08.local ceph-mon[68070]: pgmap v83: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:06.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:05 vm09.local ceph-mon[59746]: pgmap v83: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:08.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:07 vm08.local ceph-mon[68070]: pgmap v84: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:08.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:07 vm09.local ceph-mon[59746]: pgmap v84: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:09.972 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:09 vm09.local ceph-mon[59746]: pgmap v85: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:10.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:09 vm08.local ceph-mon[68070]: pgmap v85: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:12.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:11 vm08.local ceph-mon[68070]: pgmap v86: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:12.090 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:11 vm09.local ceph-mon[59746]: pgmap v86: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:14.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:13 vm08.local ceph-mon[68070]: pgmap v87: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:14.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:13 vm09.local ceph-mon[59746]: pgmap v87: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:15.841 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:15 vm09.local ceph-mon[59746]: pgmap v88: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:15.924 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:15 vm08.local ceph-mon[68070]: pgmap v88: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:17.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:16 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:16 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:16 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:16 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:16 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.022 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:16 vm08.local ceph-mon[68070]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:16 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:16 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:16 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:16 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:16 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.091 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:16 vm09.local ceph-mon[59746]: from='mgr.14156 192.168.123.108:0/3791268146' entity='mgr.a' 2026-03-07T10:32:17.568 INFO:teuthology.orchestra.run.vm08.stderr:+ grep '\-\-cpus=2' /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/unit.run 2026-03-07T10:32:17.569 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/bin/ceph-mon --privileged --group-add=disk --init --name ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a --pids-limit=-1 -d --log-driver journald --conmon-pidfile /run/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service-pid --cidfile /run/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service-cid --cgroups=split --cpus=2 -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 -e NODE_NAME=vm08.local -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728 -v /var/run/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d:/var/run/ceph:z -v /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d:/var/log/ceph:z -v /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a:/var/lib/ceph/mon/ceph-a:z -v /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config:/etc/ceph/ceph.conf:z -v /etc/hosts:/etc/hosts:ro harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false --debug_ms 10 2026-03-07T10:32:17.570 INFO:teuthology.orchestra.run.vm08.stderr:+ grep '\-\-debug_ms 10' /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/unit.run 2026-03-07T10:32:17.570 INFO:teuthology.orchestra.run.vm08.stdout:/usr/bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/bin/ceph-mon --privileged --group-add=disk --init --name ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a --pids-limit=-1 -d --log-driver journald --conmon-pidfile /run/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service-pid --cidfile /run/ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service-cid --cgroups=split --cpus=2 -e CONTAINER_IMAGE=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 -e NODE_NAME=vm08.local -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728 -v /var/run/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d:/var/run/ceph:z -v /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d:/var/log/ceph:z -v /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a:/var/lib/ceph/mon/ceph-a:z -v /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/mon.a/config:/etc/ceph/ceph.conf:z -v /etc/hosts:/etc/hosts:ro harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false --debug_ms 10 2026-03-07T10:32:17.570 INFO:teuthology.orchestra.run.vm08.stderr:+ grep testing_custom_containers /etc/cephadm_testing/testing.txt 2026-03-07T10:32:17.571 INFO:teuthology.orchestra.run.vm08.stdout:testing_custom_containers 2026-03-07T10:32:17.571 INFO:teuthology.orchestra.run.vm08.stderr:++ find /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d -maxdepth 1 -type d -name 'container.bar.*' 2026-03-07T10:32:17.572 INFO:teuthology.orchestra.run.vm08.stderr:+ dir=/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/container.bar.vm08 2026-03-07T10:32:17.572 INFO:teuthology.orchestra.run.vm08.stderr:+ test -n /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/container.bar.vm08 2026-03-07T10:32:17.572 INFO:teuthology.orchestra.run.vm08.stderr:+ grep ok /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/container.bar.vm08/data/primary.txt 2026-03-07T10:32:17.573 INFO:teuthology.orchestra.run.vm08.stdout:ok 2026-03-07T10:32:17.573 INFO:teuthology.orchestra.run.vm08.stderr:+ grep from=centos /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/container.bar.vm08/data/from.txt 2026-03-07T10:32:17.574 INFO:teuthology.orchestra.run.vm08.stdout:from=centos 2026-03-07T10:32:17.574 INFO:teuthology.orchestra.run.vm08.stderr:+ test -s /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/container.bar.vm08/data/presized.dat 2026-03-07T10:32:17.576 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-07T10:32:17.578 INFO:tasks.cephadm:Teardown begin 2026-03-07T10:32:17.578 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:32:17.639 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:32:17.664 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-07T10:32:17.665 DEBUG:teuthology.orchestra.run.vm08:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-07T10:32:17.693 DEBUG:teuthology.orchestra.run.vm09:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-07T10:32:17.718 INFO:tasks.cephadm:Stopping all daemons... 2026-03-07T10:32:17.718 INFO:tasks.cephadm.mon.a:Stopping mon.a... 2026-03-07T10:32:17.718 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a 2026-03-07T10:32:17.878 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:17 vm08.local ceph-mon[68070]: pgmap v89: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:17.878 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:17 vm08.local systemd[1]: Stopping Ceph mon.a for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:32:17.879 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:17 vm08.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a[68046]: 2026-03-07T10:32:17.823+0000 7fa3fa5ef640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false --debug_ms 10 (PID: 1) UID: 0 2026-03-07T10:32:17.879 INFO:journalctl@ceph.mon.a.vm08.stdout:Mar 07 10:32:17 vm08.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-a[68046]: 2026-03-07T10:32:17.823+0000 7fa3fa5ef640 -1 mon.a@0(leader) e2 *** Got Signal Terminated *** 2026-03-07T10:32:18.054 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.a.service' 2026-03-07T10:32:18.087 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:32:18.087 INFO:tasks.cephadm.mon.a:Stopped mon.a 2026-03-07T10:32:18.087 INFO:tasks.cephadm.mon.b:Stopping mon.b... 2026-03-07T10:32:18.087 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.b 2026-03-07T10:32:18.090 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:17 vm09.local ceph-mon[59746]: pgmap v89: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:32:18.371 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:18 vm09.local systemd[1]: Stopping Ceph mon.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:32:18.371 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:18 vm09.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b[59722]: 2026-03-07T10:32:18.172+0000 7fb0e5493640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false --debug_ms 10 (PID: 1) UID: 0 2026-03-07T10:32:18.371 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:18 vm09.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b[59722]: 2026-03-07T10:32:18.172+0000 7fb0e5493640 -1 mon.b@1(peon) e2 *** Got Signal Terminated *** 2026-03-07T10:32:18.371 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 07 10:32:18 vm09.local podman[60760]: 2026-03-07 10:32:18.258515184 +0000 UTC m=+0.098018765 container died c8ea002e8954842473b2b078c811b1193e697da3e56facec8311fd046e6d7fbe (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mon-b, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:32:18.435 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mon.b.service' 2026-03-07T10:32:18.506 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:32:18.506 INFO:tasks.cephadm.mon.b:Stopped mon.b 2026-03-07T10:32:18.507 INFO:tasks.cephadm.mgr.a:Stopping mgr.a... 2026-03-07T10:32:18.507 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mgr.a 2026-03-07T10:32:18.778 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:32:18 vm08.local systemd[1]: Stopping Ceph mgr.a for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:32:18.779 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:32:18 vm08.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:32:18.598+0000 7fbca7126640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mgr -n mgr.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:32:18.779 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:32:18 vm08.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a[50519]: 2026-03-07T10:32:18.598+0000 7fbca7126640 -1 mgr handle_mgr_signal *** Got signal Terminated *** 2026-03-07T10:32:18.779 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:32:18 vm08.local podman[69229]: 2026-03-07 10:32:18.638403541 +0000 UTC m=+0.053432475 container died 1c202ef12c43ccf11d7d85bd095d669fbe602319f1cb0baa51ae8c79220f4356 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:32:18.779 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:32:18 vm08.local podman[69229]: 2026-03-07 10:32:18.77563947 +0000 UTC m=+0.190668404 container remove 1c202ef12c43ccf11d7d85bd095d669fbe602319f1cb0baa51ae8c79220f4356 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:32:18.779 INFO:journalctl@ceph.mgr.a.vm08.stdout:Mar 07 10:32:18 vm08.local bash[69229]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-a 2026-03-07T10:32:18.837 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mgr.a.service' 2026-03-07T10:32:18.867 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:32:18.867 INFO:tasks.cephadm.mgr.a:Stopped mgr.a 2026-03-07T10:32:18.867 INFO:tasks.cephadm.mgr.b:Stopping mgr.b... 2026-03-07T10:32:18.867 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mgr.b 2026-03-07T10:32:19.157 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:32:18 vm09.local systemd[1]: Stopping Ceph mgr.b for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:32:19.157 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:32:18 vm09.local podman[60872]: 2026-03-07 10:32:18.990869044 +0000 UTC m=+0.040361980 container died f985e2249ea378a54d87b155c0c0de1e0c7adf5a43ad6ab2c10f33868909ddb9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:32:19.157 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:32:19 vm09.local podman[60872]: 2026-03-07 10:32:19.113000293 +0000 UTC m=+0.162493229 container remove f985e2249ea378a54d87b155c0c0de1e0c7adf5a43ad6ab2c10f33868909ddb9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:32:19.158 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:32:19 vm09.local bash[60872]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-mgr-b 2026-03-07T10:32:19.158 INFO:journalctl@ceph.mgr.b.vm09.stdout:Mar 07 10:32:19 vm09.local systemd[1]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mgr.b.service: Main process exited, code=exited, status=143/n/a 2026-03-07T10:32:19.165 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@mgr.b.service' 2026-03-07T10:32:19.193 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:32:19.193 INFO:tasks.cephadm.mgr.b:Stopped mgr.b 2026-03-07T10:32:19.193 INFO:tasks.cephadm.osd.0:Stopping osd.0... 2026-03-07T10:32:19.193 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@osd.0 2026-03-07T10:32:19.522 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:19 vm08.local systemd[1]: Stopping Ceph osd.0 for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:32:19.522 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:19 vm08.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0[59725]: 2026-03-07T10:32:19.282+0000 7f8a9bf46640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:32:19.522 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:19 vm08.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0[59725]: 2026-03-07T10:32:19.282+0000 7f8a9bf46640 -1 osd.0 13 *** Got signal Terminated *** 2026-03-07T10:32:19.522 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:19 vm08.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0[59725]: 2026-03-07T10:32:19.282+0000 7f8a9bf46640 -1 osd.0 13 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-07T10:32:24.563 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:24 vm08.local podman[69342]: 2026-03-07 10:32:24.301638673 +0000 UTC m=+5.032263496 container died a11653d81a7534d71be98c439a007d2e0e0c59e3238a582cfb0f2a16e3e79a73 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:32:24.563 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:24 vm08.local podman[69342]: 2026-03-07 10:32:24.42289428 +0000 UTC m=+5.153519103 container remove a11653d81a7534d71be98c439a007d2e0e0c59e3238a582cfb0f2a16e3e79a73 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:32:24.563 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:24 vm08.local bash[69342]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0 2026-03-07T10:32:24.563 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:24 vm08.local podman[69420]: 2026-03-07 10:32:24.539173713 +0000 UTC m=+0.013330184 container create 6da5a1616e0a2e07d3ced818feb6da2d21555341e46c691392baa62f1386c489 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0-deactivate, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:32:24.922 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:24 vm08.local podman[69420]: 2026-03-07 10:32:24.574062337 +0000 UTC m=+0.048218818 container init 6da5a1616e0a2e07d3ced818feb6da2d21555341e46c691392baa62f1386c489 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:32:24.922 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:24 vm08.local podman[69420]: 2026-03-07 10:32:24.580218417 +0000 UTC m=+0.054374888 container start 6da5a1616e0a2e07d3ced818feb6da2d21555341e46c691392baa62f1386c489 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0-deactivate, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:32:24.922 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:24 vm08.local podman[69420]: 2026-03-07 10:32:24.582842597 +0000 UTC m=+0.056999059 container attach 6da5a1616e0a2e07d3ced818feb6da2d21555341e46c691392baa62f1386c489 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-0-deactivate, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:32:24.922 INFO:journalctl@ceph.osd.0.vm08.stdout:Mar 07 10:32:24 vm08.local podman[69420]: 2026-03-07 10:32:24.533743101 +0000 UTC m=+0.007899583 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:32:25.058 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@osd.0.service' 2026-03-07T10:32:25.090 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:32:25.090 INFO:tasks.cephadm.osd.0:Stopped osd.0 2026-03-07T10:32:25.090 INFO:tasks.cephadm.osd.1:Stopping osd.1... 2026-03-07T10:32:25.090 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@osd.1 2026-03-07T10:32:25.591 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:25 vm09.local systemd[1]: Stopping Ceph osd.1 for 630831e6-1a10-11f1-b289-9dc3f8f14d3d... 2026-03-07T10:32:25.591 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:25 vm09.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1[55819]: 2026-03-07T10:32:25.181+0000 7f8631d96640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:32:25.591 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:25 vm09.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1[55819]: 2026-03-07T10:32:25.181+0000 7f8631d96640 -1 osd.1 13 *** Got signal Terminated *** 2026-03-07T10:32:25.591 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:25 vm09.local ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1[55819]: 2026-03-07T10:32:25.181+0000 7f8631d96640 -1 osd.1 13 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-07T10:32:30.401 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:30 vm09.local podman[60986]: 2026-03-07 10:32:30.211096874 +0000 UTC m=+5.041857762 container died 433eb69a8a320cd9106de36b7db11aee93077c2a8023a3768d453dc0c3d6b21a (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:32:30.401 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:30 vm09.local podman[60986]: 2026-03-07 10:32:30.32849704 +0000 UTC m=+5.159257918 container remove 433eb69a8a320cd9106de36b7db11aee93077c2a8023a3768d453dc0c3d6b21a (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-07T10:32:30.401 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:30 vm09.local bash[60986]: ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1 2026-03-07T10:32:30.841 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:30 vm09.local podman[61157]: 2026-03-07 10:32:30.515348143 +0000 UTC m=+0.019252240 container create 613578a6205f35af40c06c6e8dd1345182e8a845bb6ac545b794fb3f43660998 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1-deactivate, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:32:30.841 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:30 vm09.local podman[61157]: 2026-03-07 10:32:30.563489922 +0000 UTC m=+0.067394029 container init 613578a6205f35af40c06c6e8dd1345182e8a845bb6ac545b794fb3f43660998 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1-deactivate, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:32:30.841 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:30 vm09.local podman[61157]: 2026-03-07 10:32:30.581369354 +0000 UTC m=+0.085273451 container start 613578a6205f35af40c06c6e8dd1345182e8a845bb6ac545b794fb3f43660998 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1-deactivate, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:32:30.841 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:30 vm09.local podman[61157]: 2026-03-07 10:32:30.582595257 +0000 UTC m=+0.086499355 container attach 613578a6205f35af40c06c6e8dd1345182e8a845bb6ac545b794fb3f43660998 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d-osd-1-deactivate, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:32:30.841 INFO:journalctl@ceph.osd.1.vm09.stdout:Mar 07 10:32:30 vm09.local podman[61157]: 2026-03-07 10:32:30.504817235 +0000 UTC m=+0.008721332 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:32:31.071 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-630831e6-1a10-11f1-b289-9dc3f8f14d3d@osd.1.service' 2026-03-07T10:32:31.100 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:32:31.100 INFO:tasks.cephadm.osd.1:Stopped osd.1 2026-03-07T10:32:31.100 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d --force --keep-logs 2026-03-07T10:32:31.213 INFO:teuthology.orchestra.run.vm08.stdout:Deleting cluster with fsid: 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:32:33.249 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d --force --keep-logs 2026-03-07T10:32:33.362 INFO:teuthology.orchestra.run.vm09.stdout:Deleting cluster with fsid: 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:32:35.450 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:32:35.474 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:32:35.498 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-07T10:32:35.498 DEBUG:teuthology.misc:Transferring archived files from vm08:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/crash to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/18/remote/vm08/crash 2026-03-07T10:32:35.498 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/crash -- . 2026-03-07T10:32:35.537 INFO:teuthology.orchestra.run.vm08.stderr:tar: /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/crash: Cannot open: No such file or directory 2026-03-07T10:32:35.537 INFO:teuthology.orchestra.run.vm08.stderr:tar: Error is not recoverable: exiting now 2026-03-07T10:32:35.538 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/crash to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/18/remote/vm09/crash 2026-03-07T10:32:35.538 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/crash -- . 2026-03-07T10:32:35.561 INFO:teuthology.orchestra.run.vm09.stderr:tar: /var/lib/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/crash: Cannot open: No such file or directory 2026-03-07T10:32:35.561 INFO:teuthology.orchestra.run.vm09.stderr:tar: Error is not recoverable: exiting now 2026-03-07T10:32:35.562 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-07T10:32:35.562 DEBUG:teuthology.orchestra.run.vm08:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-03-07T10:32:35.603 INFO:tasks.cephadm:Compressing logs... 2026-03-07T10:32:35.603 DEBUG:teuthology.orchestra.run.vm08:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:32:35.646 DEBUG:teuthology.orchestra.run.vm09:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:32:35.665 INFO:teuthology.orchestra.run.vm08.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-07T10:32:35.666 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-07T10:32:35.666 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mon.a.log 2026-03-07T10:32:35.667 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.log 2026-03-07T10:32:35.668 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mon.a.log: 86.8% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-07T10:32:35.668 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.audit.log 2026-03-07T10:32:35.669 INFO:teuthology.orchestra.run.vm09.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-07T10:32:35.669 INFO:teuthology.orchestra.run.vm09.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-07T10:32:35.669 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.log: 85.9% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.log.gz 2026-03-07T10:32:35.669 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mgr.a.log 2026-03-07T10:32:35.670 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-volume.log 2026-03-07T10:32:35.670 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.audit.log: 90.1% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.audit.log.gz 2026-03-07T10:32:35.670 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.cephadm.log 2026-03-07T10:32:35.671 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/cephadm.log: 83.8% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-07T10:32:35.671 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mon.b.log 2026-03-07T10:32:35.671 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.audit.log 2026-03-07T10:32:35.675 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mgr.a.log: gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-volume.log 2026-03-07T10:32:35.676 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.cephadm.log: 80.0% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.cephadm.log.gz 2026-03-07T10:32:35.676 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-volume.log: /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mon.b.log: gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.log 2026-03-07T10:32:35.678 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.audit.log: 90.3% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.audit.log.gz 2026-03-07T10:32:35.679 INFO:teuthology.orchestra.run.vm09.stderr: 92.7% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-volume.log.gz 2026-03-07T10:32:35.679 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.cephadm.log 2026-03-07T10:32:35.679 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.log: 85.8% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.log.gz 2026-03-07T10:32:35.680 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mgr.b.log 2026-03-07T10:32:35.680 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-osd.0.log 2026-03-07T10:32:35.680 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.cephadm.log: 78.2% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph.cephadm.log.gz 2026-03-07T10:32:35.680 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-osd.1.log 2026-03-07T10:32:35.682 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mgr.b.log: 90.9% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mgr.b.log.gz 2026-03-07T10:32:35.691 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-osd.1.log: 93.7% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-osd.1.log.gz 2026-03-07T10:32:35.695 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-volume.log: /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-osd.0.log: 92.7% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-volume.log.gz 2026-03-07T10:32:35.703 INFO:teuthology.orchestra.run.vm09.stderr: 93.5% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mon.b.log.gz 2026-03-07T10:32:35.705 INFO:teuthology.orchestra.run.vm09.stderr: 2026-03-07T10:32:35.705 INFO:teuthology.orchestra.run.vm09.stderr:real 0m0.045s 2026-03-07T10:32:35.705 INFO:teuthology.orchestra.run.vm09.stderr:user 0m0.056s 2026-03-07T10:32:35.705 INFO:teuthology.orchestra.run.vm09.stderr:sys 0m0.014s 2026-03-07T10:32:35.708 INFO:teuthology.orchestra.run.vm08.stderr: 93.7% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-osd.0.log.gz 2026-03-07T10:32:35.719 INFO:teuthology.orchestra.run.vm08.stderr: 88.9% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mgr.a.log.gz 2026-03-07T10:32:35.754 INFO:teuthology.orchestra.run.vm08.stderr: 91.9% -- replaced with /var/log/ceph/630831e6-1a10-11f1-b289-9dc3f8f14d3d/ceph-mon.a.log.gz 2026-03-07T10:32:35.755 INFO:teuthology.orchestra.run.vm08.stderr: 2026-03-07T10:32:35.755 INFO:teuthology.orchestra.run.vm08.stderr:real 0m0.098s 2026-03-07T10:32:35.755 INFO:teuthology.orchestra.run.vm08.stderr:user 0m0.137s 2026-03-07T10:32:35.755 INFO:teuthology.orchestra.run.vm08.stderr:sys 0m0.019s 2026-03-07T10:32:35.756 INFO:tasks.cephadm:Archiving logs... 2026-03-07T10:32:35.756 DEBUG:teuthology.misc:Transferring archived files from vm08:/var/log/ceph to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/18/remote/vm08/log 2026-03-07T10:32:35.756 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-07T10:32:35.827 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/log/ceph to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/18/remote/vm09/log 2026-03-07T10:32:35.827 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-07T10:32:35.853 INFO:tasks.cephadm:Removing cluster... 2026-03-07T10:32:35.853 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d --force 2026-03-07T10:32:35.979 INFO:teuthology.orchestra.run.vm08.stdout:Deleting cluster with fsid: 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:32:36.059 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 630831e6-1a10-11f1-b289-9dc3f8f14d3d --force 2026-03-07T10:32:36.174 INFO:teuthology.orchestra.run.vm09.stdout:Deleting cluster with fsid: 630831e6-1a10-11f1-b289-9dc3f8f14d3d 2026-03-07T10:32:36.255 INFO:tasks.cephadm:Removing cephadm ... 2026-03-07T10:32:36.255 DEBUG:teuthology.orchestra.run.vm08:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-07T10:32:36.269 DEBUG:teuthology.orchestra.run.vm09:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-07T10:32:36.282 INFO:tasks.cephadm:Teardown complete 2026-03-07T10:32:36.282 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-07T10:32:36.284 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-07T10:32:36.284 DEBUG:teuthology.orchestra.run.vm08:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:32:36.311 DEBUG:teuthology.orchestra.run.vm09:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:32:36.322 INFO:teuthology.orchestra.run.vm08.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:32:36.335 INFO:teuthology.orchestra.run.vm09.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:32:36.401 INFO:teuthology.orchestra.run.vm09.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:32:36.401 INFO:teuthology.orchestra.run.vm09.stdout:=============================================================================== 2026-03-07T10:32:36.401 INFO:teuthology.orchestra.run.vm09.stdout:^+ v2202508239286376495.ult> 2 6 177 51 +1892us[+1882us] +/- 18ms 2026-03-07T10:32:36.401 INFO:teuthology.orchestra.run.vm09.stdout:^* node-4.infogral.is 2 6 177 50 -908us[ -919us] +/- 15ms 2026-03-07T10:32:36.401 INFO:teuthology.orchestra.run.vm09.stdout:^+ obelix.hetzner.computer4> 3 6 177 49 -871us[ -871us] +/- 20ms 2026-03-07T10:32:36.401 INFO:teuthology.orchestra.run.vm09.stdout:^- red-pelican-63749.zap.cl> 2 6 37 26 +913us[ +913us] +/- 24ms 2026-03-07T10:32:36.490 INFO:teuthology.orchestra.run.vm08.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:32:36.490 INFO:teuthology.orchestra.run.vm08.stdout:=============================================================================== 2026-03-07T10:32:36.490 INFO:teuthology.orchestra.run.vm08.stdout:^+ obelix.hetzner.computer4> 3 6 177 50 -843us[ -812us] +/- 20ms 2026-03-07T10:32:36.490 INFO:teuthology.orchestra.run.vm08.stdout:^+ v2202508239286376495.ult> 2 6 177 48 +1851us[+1851us] +/- 18ms 2026-03-07T10:32:36.490 INFO:teuthology.orchestra.run.vm08.stdout:^? fire1.cucl2.de 2 7 40 250 -953us[ -106us] +/- 34ms 2026-03-07T10:32:36.490 INFO:teuthology.orchestra.run.vm08.stdout:^* node-4.infogral.is 2 6 177 50 -891us[ -860us] +/- 15ms 2026-03-07T10:32:36.490 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-07T10:32:36.492 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-07T10:32:36.493 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-07T10:32:36.494 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-07T10:32:36.496 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-07T10:32:36.498 INFO:teuthology.task.internal:Duration was 353.879469 seconds 2026-03-07T10:32:36.498 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-07T10:32:36.500 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-07T10:32:36.500 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-07T10:32:36.532 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-07T10:32:36.568 INFO:teuthology.orchestra.run.vm08.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:32:36.568 INFO:teuthology.orchestra.run.vm09.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:32:36.997 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-07T10:32:36.997 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm08.local 2026-03-07T10:32:36.997 DEBUG:teuthology.orchestra.run.vm08:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-07T10:32:37.059 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm09.local 2026-03-07T10:32:37.059 DEBUG:teuthology.orchestra.run.vm09:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-07T10:32:37.081 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-07T10:32:37.081 DEBUG:teuthology.orchestra.run.vm08:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:32:37.101 DEBUG:teuthology.orchestra.run.vm09:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:32:37.501 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-07T10:32:37.502 DEBUG:teuthology.orchestra.run.vm08:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:32:37.503 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:32:37.522 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:32:37.522 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:32:37.523 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose/home/ubuntu/cephtest/archive/syslog/kern.log: -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:32:37.523 INFO:teuthology.orchestra.run.vm08.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-07T10:32:37.523 INFO:teuthology.orchestra.run.vm08.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-07T10:32:37.523 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:32:37.523 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:32:37.524 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-07T10:32:37.524 INFO:teuthology.orchestra.run.vm09.stderr: --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:32:37.524 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-07T10:32:37.643 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.3% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-07T10:32:37.658 INFO:teuthology.orchestra.run.vm08.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.1% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-07T10:32:37.660 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-07T10:32:37.663 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-07T10:32:37.663 DEBUG:teuthology.orchestra.run.vm08:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-07T10:32:37.722 DEBUG:teuthology.orchestra.run.vm09:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-07T10:32:37.744 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-07T10:32:37.747 DEBUG:teuthology.orchestra.run.vm08:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:32:37.764 DEBUG:teuthology.orchestra.run.vm09:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:32:37.786 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern = core 2026-03-07T10:32:37.808 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = core 2026-03-07T10:32:37.820 DEBUG:teuthology.orchestra.run.vm08:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:32:37.850 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:32:37.850 DEBUG:teuthology.orchestra.run.vm09:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:32:37.872 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:32:37.872 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-07T10:32:37.875 INFO:teuthology.task.internal:Transferring archived files... 2026-03-07T10:32:37.875 DEBUG:teuthology.misc:Transferring archived files from vm08:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/18/remote/vm08 2026-03-07T10:32:37.875 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-07T10:32:37.915 DEBUG:teuthology.misc:Transferring archived files from vm09:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/18/remote/vm09 2026-03-07T10:32:37.915 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-07T10:32:37.939 INFO:teuthology.task.internal:Removing archive directory... 2026-03-07T10:32:37.940 DEBUG:teuthology.orchestra.run.vm08:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-07T10:32:37.957 DEBUG:teuthology.orchestra.run.vm09:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-07T10:32:37.993 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-07T10:32:37.995 INFO:teuthology.task.internal:Not uploading archives. 2026-03-07T10:32:37.995 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-07T10:32:37.997 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-07T10:32:37.997 DEBUG:teuthology.orchestra.run.vm08:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-07T10:32:38.011 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-07T10:32:38.024 INFO:teuthology.orchestra.run.vm08.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 7 10:32 /home/ubuntu/cephtest 2026-03-07T10:32:38.048 INFO:teuthology.orchestra.run.vm09.stdout: 8532144 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 7 10:32 /home/ubuntu/cephtest 2026-03-07T10:32:38.049 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-07T10:32:38.054 INFO:teuthology.run:Summary data: description: orch:cephadm:workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_extra_daemon_features} duration: 353.87946939468384 owner: irq0 success: true 2026-03-07T10:32:38.054 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-07T10:32:38.074 INFO:teuthology.run:pass