2026-03-07T10:14:44.825 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-07T10:14:44.830 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-07T10:14:44.848 INFO:teuthology.run:Config: archive_path: /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13 branch: cobaltcore-storage-v19.2.3-fasttrack-5 description: orch:cephadm:workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_rgw_multisite} email: null first_in_suite: false flavor: default job_id: '13' last_in_suite: false machine_type: vps name: irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: cobaltcore-storage-v19.2.3-fasttrack-5 ansible.cephlab: branch: main repo: https://github.com/kshtsk/ceph-cm-ansible.git skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: global: mon election default strategy: 1 mgr: debug mgr: 20 debug ms: 1 mgr/cephadm/use_agent: false mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - MON_DOWN - mons down - mon down - out of quorum - CEPHADM_STRAY_DAEMON log-only-match: - CEPHADM_ sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} cephadm: cephadm_binary_url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm containers: image: harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 install: ceph: flavor: default sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a extra_system_packages: deb: - python3-xmltodict - s3cmd rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - s3cmd repos: - name: ceph-source priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/SRPMS - name: ceph-noarch priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/noarch - name: ceph priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/x86_64 selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 - scontext=system_u:system_r:getty_t:s0 workunit: branch: tt-fasttrack-5-workunits sha1: f96e33505a05da25eb24b46ae34fbbd1718a702b owner: irq0 priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - mon.a - mgr.a - osd.0 - - host.b - mon.b - mgr.b - osd.1 - - host.c - mon.c - osd.2 seed: 8363 sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a sleep_before_teardown: 0 subset: 1/64 suite: orch:cephadm:workunits suite_branch: tt-fasttrack-5-workunits suite_path: /home/teuthos/src/github.com_kshtsk_ceph_f96e33505a05da25eb24b46ae34fbbd1718a702b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: f96e33505a05da25eb24b46ae34fbbd1718a702b targets: vm03.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFTOAzbeZZulUlWG8ciTYI14Ss0rUOy+FPMSLnTkO/r0Ul0pPEyBapIW47BZLPFjHgy07D3MvQWqVbuJIdmqj6o= vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC/yoKE07ZgK4pzA5XwKm8iaGYUWbuBuPXBEQhstho2fWIvGx2vuGCrD6hyqZW3LfU3u2q4hkAQs4mQvMU+Z6fw= vm08.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJJ6F1HrlqiYqJV7bhpZ//3Geh0DPK0ve8Wqd54Sij5sf5FdzDazvf+orbWmwwdlp3H4fgAVeGP13iqQN5vguqE= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install nvmetcli nvme-cli -y - cephadm: null - cephadm.shell: host.a: - ceph mgr module enable rgw - rgw_module.apply: specs: - rgw_realm: myrealm1 rgw_zone: myzone1 rgw_zonegroup: myzonegroup1 spec: rgw_frontend_port: 5500 - cephadm.shell: host.a: - 'set -e set -x while true; do TOKEN=$(ceph rgw realm tokens | jq -r ''.[0].token''); echo $TOKEN; if [ "$TOKEN" != "master zone has no endpoint" ]; then break; fi; sleep 5; done TOKENS=$(ceph rgw realm tokens) echo $TOKENS | jq --exit-status ''.[0].realm == "myrealm1"'' echo $TOKENS | jq --exit-status ''.[0].token'' TOKEN_JSON=$(ceph rgw realm tokens | jq -r ''.[0].token'' | base64 --decode) echo $TOKEN_JSON | jq --exit-status ''.realm_name == "myrealm1"'' echo $TOKEN_JSON | jq --exit-status ''.endpoint | test("http://.+:\\d+")'' echo $TOKEN_JSON | jq --exit-status ''.realm_id | test("^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$")'' echo $TOKEN_JSON | jq --exit-status ''.access_key'' echo $TOKEN_JSON | jq --exit-status ''.secret'' ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-07_10:02:54 tube: vps user: irq0 verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.2764 2026-03-07T10:14:44.848 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_f96e33505a05da25eb24b46ae34fbbd1718a702b/qa; will attempt to use it 2026-03-07T10:14:44.848 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_f96e33505a05da25eb24b46ae34fbbd1718a702b/qa/tasks 2026-03-07T10:14:44.848 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-07T10:14:44.849 INFO:teuthology.task.internal:Saving configuration 2026-03-07T10:14:44.853 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-07T10:14:44.854 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-07T10:14:44.860 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm03.local', 'description': '/archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-07 10:13:11.908308', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:03', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFTOAzbeZZulUlWG8ciTYI14Ss0rUOy+FPMSLnTkO/r0Ul0pPEyBapIW47BZLPFjHgy07D3MvQWqVbuJIdmqj6o='} 2026-03-07T10:14:44.864 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm06.local', 'description': '/archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-07 10:13:11.908580', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:06', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC/yoKE07ZgK4pzA5XwKm8iaGYUWbuBuPXBEQhstho2fWIvGx2vuGCrD6hyqZW3LfU3u2q4hkAQs4mQvMU+Z6fw='} 2026-03-07T10:14:44.868 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm08.local', 'description': '/archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-07 10:13:11.907680', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:08', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJJ6F1HrlqiYqJV7bhpZ//3Geh0DPK0ve8Wqd54Sij5sf5FdzDazvf+orbWmwwdlp3H4fgAVeGP13iqQN5vguqE='} 2026-03-07T10:14:44.869 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-07T10:14:44.869 INFO:teuthology.task.internal:roles: ubuntu@vm03.local - ['host.a', 'mon.a', 'mgr.a', 'osd.0'] 2026-03-07T10:14:44.869 INFO:teuthology.task.internal:roles: ubuntu@vm06.local - ['host.b', 'mon.b', 'mgr.b', 'osd.1'] 2026-03-07T10:14:44.869 INFO:teuthology.task.internal:roles: ubuntu@vm08.local - ['host.c', 'mon.c', 'osd.2'] 2026-03-07T10:14:44.869 INFO:teuthology.run_tasks:Running task console_log... 2026-03-07T10:14:44.874 DEBUG:teuthology.task.console_log:vm03 does not support IPMI; excluding 2026-03-07T10:14:44.880 DEBUG:teuthology.task.console_log:vm06 does not support IPMI; excluding 2026-03-07T10:14:44.885 DEBUG:teuthology.task.console_log:vm08 does not support IPMI; excluding 2026-03-07T10:14:44.885 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7f9c341e7ac0>, signals=[15]) 2026-03-07T10:14:44.885 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-07T10:14:44.886 INFO:teuthology.task.internal:Opening connections... 2026-03-07T10:14:44.886 DEBUG:teuthology.task.internal:connecting to ubuntu@vm03.local 2026-03-07T10:14:44.886 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:14:44.945 DEBUG:teuthology.task.internal:connecting to ubuntu@vm06.local 2026-03-07T10:14:44.945 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:14:45.003 DEBUG:teuthology.task.internal:connecting to ubuntu@vm08.local 2026-03-07T10:14:45.003 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm08.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:14:45.060 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-07T10:14:45.061 DEBUG:teuthology.orchestra.run.vm03:> uname -m 2026-03-07T10:14:45.079 INFO:teuthology.orchestra.run.vm03.stdout:x86_64 2026-03-07T10:14:45.079 DEBUG:teuthology.orchestra.run.vm03:> cat /etc/os-release 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:NAME="CentOS Stream" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:VERSION="9" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:ID="centos" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:ID_LIKE="rhel fedora" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:VERSION_ID="9" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:PLATFORM_ID="platform:el9" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:ANSI_COLOR="0;31" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:LOGO="fedora-logo-icon" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:HOME_URL="https://centos.org/" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-07T10:14:45.133 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-07T10:14:45.133 INFO:teuthology.lock.ops:Updating vm03.local on lock server 2026-03-07T10:14:45.138 DEBUG:teuthology.orchestra.run.vm06:> uname -m 2026-03-07T10:14:45.153 INFO:teuthology.orchestra.run.vm06.stdout:x86_64 2026-03-07T10:14:45.153 DEBUG:teuthology.orchestra.run.vm06:> cat /etc/os-release 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:NAME="CentOS Stream" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:VERSION="9" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:ID="centos" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:ID_LIKE="rhel fedora" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_ID="9" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:PLATFORM_ID="platform:el9" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:ANSI_COLOR="0;31" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:LOGO="fedora-logo-icon" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:HOME_URL="https://centos.org/" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-07T10:14:45.208 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-07T10:14:45.208 INFO:teuthology.lock.ops:Updating vm06.local on lock server 2026-03-07T10:14:45.212 DEBUG:teuthology.orchestra.run.vm08:> uname -m 2026-03-07T10:14:45.226 INFO:teuthology.orchestra.run.vm08.stdout:x86_64 2026-03-07T10:14:45.226 DEBUG:teuthology.orchestra.run.vm08:> cat /etc/os-release 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:NAME="CentOS Stream" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:VERSION="9" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:ID="centos" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:ID_LIKE="rhel fedora" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:VERSION_ID="9" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:PLATFORM_ID="platform:el9" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:ANSI_COLOR="0;31" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:LOGO="fedora-logo-icon" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:HOME_URL="https://centos.org/" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-07T10:14:45.280 INFO:teuthology.orchestra.run.vm08.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-07T10:14:45.281 INFO:teuthology.lock.ops:Updating vm08.local on lock server 2026-03-07T10:14:45.285 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-07T10:14:45.286 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-07T10:14:45.287 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-07T10:14:45.287 DEBUG:teuthology.orchestra.run.vm03:> test '!' -e /home/ubuntu/cephtest 2026-03-07T10:14:45.289 DEBUG:teuthology.orchestra.run.vm06:> test '!' -e /home/ubuntu/cephtest 2026-03-07T10:14:45.290 DEBUG:teuthology.orchestra.run.vm08:> test '!' -e /home/ubuntu/cephtest 2026-03-07T10:14:45.335 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-07T10:14:45.336 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-07T10:14:45.336 DEBUG:teuthology.orchestra.run.vm03:> test -z $(ls -A /var/lib/ceph) 2026-03-07T10:14:45.347 DEBUG:teuthology.orchestra.run.vm06:> test -z $(ls -A /var/lib/ceph) 2026-03-07T10:14:45.348 DEBUG:teuthology.orchestra.run.vm08:> test -z $(ls -A /var/lib/ceph) 2026-03-07T10:14:45.361 INFO:teuthology.orchestra.run.vm06.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-07T10:14:45.362 INFO:teuthology.orchestra.run.vm03.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-07T10:14:45.391 INFO:teuthology.orchestra.run.vm08.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-07T10:14:45.391 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-07T10:14:45.398 DEBUG:teuthology.orchestra.run.vm03:> test -e /ceph-qa-ready 2026-03-07T10:14:45.417 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:14:45.613 DEBUG:teuthology.orchestra.run.vm06:> test -e /ceph-qa-ready 2026-03-07T10:14:45.627 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:14:45.813 DEBUG:teuthology.orchestra.run.vm08:> test -e /ceph-qa-ready 2026-03-07T10:14:45.827 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:14:46.018 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-07T10:14:46.019 INFO:teuthology.task.internal:Creating test directory... 2026-03-07T10:14:46.019 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-07T10:14:46.021 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-07T10:14:46.023 DEBUG:teuthology.orchestra.run.vm08:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-07T10:14:46.040 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-07T10:14:46.042 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-07T10:14:46.043 INFO:teuthology.task.internal:Creating archive directory... 2026-03-07T10:14:46.043 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-07T10:14:46.079 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-07T10:14:46.083 DEBUG:teuthology.orchestra.run.vm08:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-07T10:14:46.103 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-07T10:14:46.104 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-07T10:14:46.104 DEBUG:teuthology.orchestra.run.vm03:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-07T10:14:46.148 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:14:46.148 DEBUG:teuthology.orchestra.run.vm06:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-07T10:14:46.167 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:14:46.167 DEBUG:teuthology.orchestra.run.vm08:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-07T10:14:46.181 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:14:46.181 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-07T10:14:46.191 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-07T10:14:46.210 DEBUG:teuthology.orchestra.run.vm08:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-07T10:14:46.212 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:14:46.222 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:14:46.240 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:14:46.250 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:14:46.250 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:14:46.261 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:14:46.262 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-07T10:14:46.263 INFO:teuthology.task.internal:Configuring sudo... 2026-03-07T10:14:46.263 DEBUG:teuthology.orchestra.run.vm03:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-07T10:14:46.265 DEBUG:teuthology.orchestra.run.vm06:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-07T10:14:46.293 DEBUG:teuthology.orchestra.run.vm08:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-07T10:14:46.327 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-07T10:14:46.329 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-07T10:14:46.329 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-07T10:14:46.333 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-07T10:14:46.360 DEBUG:teuthology.orchestra.run.vm08:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-07T10:14:46.380 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:14:46.413 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:14:46.472 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-07T10:14:46.473 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-07T10:14:46.530 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:14:46.554 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:14:46.613 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-07T10:14:46.613 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-07T10:14:46.675 DEBUG:teuthology.orchestra.run.vm08:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:14:46.697 DEBUG:teuthology.orchestra.run.vm08:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:14:46.753 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:14:46.753 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-07T10:14:46.811 DEBUG:teuthology.orchestra.run.vm03:> sudo service rsyslog restart 2026-03-07T10:14:46.813 DEBUG:teuthology.orchestra.run.vm06:> sudo service rsyslog restart 2026-03-07T10:14:46.815 DEBUG:teuthology.orchestra.run.vm08:> sudo service rsyslog restart 2026-03-07T10:14:46.838 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:14:46.844 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:14:46.877 INFO:teuthology.orchestra.run.vm08.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:14:47.315 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-07T10:14:47.317 INFO:teuthology.task.internal:Starting timer... 2026-03-07T10:14:47.317 INFO:teuthology.run_tasks:Running task pcp... 2026-03-07T10:14:47.319 INFO:teuthology.run_tasks:Running task selinux... 2026-03-07T10:14:47.321 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0', 'scontext=system_u:system_r:getty_t:s0']} 2026-03-07T10:14:47.321 INFO:teuthology.task.selinux:Excluding vm03: VMs are not yet supported 2026-03-07T10:14:47.321 INFO:teuthology.task.selinux:Excluding vm06: VMs are not yet supported 2026-03-07T10:14:47.321 INFO:teuthology.task.selinux:Excluding vm08: VMs are not yet supported 2026-03-07T10:14:47.321 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-07T10:14:47.322 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-07T10:14:47.322 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-07T10:14:47.322 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-07T10:14:47.323 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'repo': 'https://github.com/kshtsk/ceph-cm-ansible.git', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-07T10:14:47.323 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/kshtsk/ceph-cm-ansible.git 2026-03-07T10:14:47.325 INFO:teuthology.repo_utils:Fetching github.com_kshtsk_ceph-cm-ansible_main from origin 2026-03-07T10:14:47.873 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main to origin/main 2026-03-07T10:14:47.878 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-07T10:14:47.878 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventory672osg9c --limit vm03.local,vm06.local,vm08.local /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-07T10:17:16.138 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm03.local'), Remote(name='ubuntu@vm06.local'), Remote(name='ubuntu@vm08.local')] 2026-03-07T10:17:16.139 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm03.local' 2026-03-07T10:17:16.139 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:17:16.206 DEBUG:teuthology.orchestra.run.vm03:> true 2026-03-07T10:17:16.286 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm03.local' 2026-03-07T10:17:16.287 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm06.local' 2026-03-07T10:17:16.287 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:17:16.349 DEBUG:teuthology.orchestra.run.vm06:> true 2026-03-07T10:17:16.433 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm06.local' 2026-03-07T10:17:16.433 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm08.local' 2026-03-07T10:17:16.433 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm08.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:17:16.498 DEBUG:teuthology.orchestra.run.vm08:> true 2026-03-07T10:17:16.580 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm08.local' 2026-03-07T10:17:16.580 INFO:teuthology.run_tasks:Running task clock... 2026-03-07T10:17:16.583 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-07T10:17:16.583 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-07T10:17:16.584 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:17:16.585 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-07T10:17:16.585 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:17:16.588 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-07T10:17:16.589 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:17:16.621 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-07T10:17:16.626 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-07T10:17:16.639 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-07T10:17:16.644 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-07T10:17:16.658 INFO:teuthology.orchestra.run.vm08.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-07T10:17:16.670 INFO:teuthology.orchestra.run.vm03.stderr:sudo: ntpd: command not found 2026-03-07T10:17:16.671 INFO:teuthology.orchestra.run.vm06.stderr:sudo: ntpd: command not found 2026-03-07T10:17:16.681 INFO:teuthology.orchestra.run.vm08.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-07T10:17:16.685 INFO:teuthology.orchestra.run.vm03.stdout:506 Cannot talk to daemon 2026-03-07T10:17:16.687 INFO:teuthology.orchestra.run.vm06.stdout:506 Cannot talk to daemon 2026-03-07T10:17:16.706 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-07T10:17:16.711 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-07T10:17:16.716 INFO:teuthology.orchestra.run.vm08.stderr:sudo: ntpd: command not found 2026-03-07T10:17:16.724 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-07T10:17:16.729 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-07T10:17:16.731 INFO:teuthology.orchestra.run.vm08.stdout:506 Cannot talk to daemon 2026-03-07T10:17:16.747 INFO:teuthology.orchestra.run.vm08.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-07T10:17:16.763 INFO:teuthology.orchestra.run.vm08.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-07T10:17:16.768 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:17:16.778 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:17:16.818 INFO:teuthology.orchestra.run.vm08.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:17:16.879 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:17:16.879 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-07T10:17:16.879 INFO:teuthology.orchestra.run.vm06.stdout:^? srv01.spectre-net.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.879 INFO:teuthology.orchestra.run.vm06.stdout:^? srv01-nc.securepod.org 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.879 INFO:teuthology.orchestra.run.vm06.stdout:^? netcup02.theravenhub.com 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.879 INFO:teuthology.orchestra.run.vm06.stdout:^? 139-162-156-95.ip.linode> 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm08.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm08.stdout:=============================================================================== 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm08.stdout:^? srv01-nc.securepod.org 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm08.stdout:^? srv01.spectre-net.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm08.stdout:^? netcup02.theravenhub.com 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm08.stdout:^? 139-162-156-95.ip.linode> 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm03.stdout:^? netcup02.theravenhub.com 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm03.stdout:^? 139-162-156-95.ip.linode> 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm03.stdout:^? srv01.spectre-net.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.880 INFO:teuthology.orchestra.run.vm03.stdout:^? srv01-nc.securepod.org 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-07T10:17:16.880 INFO:teuthology.run_tasks:Running task pexec... 2026-03-07T10:17:16.883 INFO:teuthology.task.pexec:Executing custom commands... 2026-03-07T10:17:16.883 DEBUG:teuthology.orchestra.run.vm03:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-07T10:17:16.883 DEBUG:teuthology.orchestra.run.vm06:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-07T10:17:16.883 DEBUG:teuthology.orchestra.run.vm08:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-07T10:17:16.923 DEBUG:teuthology.task.pexec:ubuntu@vm08.local< sudo dnf remove nvme-cli -y 2026-03-07T10:17:16.923 DEBUG:teuthology.task.pexec:ubuntu@vm08.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:17:16.923 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm08.local 2026-03-07T10:17:16.923 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-07T10:17:16.923 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:17:16.923 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo dnf remove nvme-cli -y 2026-03-07T10:17:16.923 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:17:16.923 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm06.local 2026-03-07T10:17:16.923 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-07T10:17:16.923 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:17:16.923 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo dnf remove nvme-cli -y 2026-03-07T10:17:16.924 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:17:16.924 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm03.local 2026-03-07T10:17:16.924 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-07T10:17:16.924 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:17:17.161 INFO:teuthology.orchestra.run.vm06.stdout:No match for argument: nvme-cli 2026-03-07T10:17:17.162 INFO:teuthology.orchestra.run.vm06.stderr:No packages marked for removal. 2026-03-07T10:17:17.168 INFO:teuthology.orchestra.run.vm06.stdout:Dependencies resolved. 2026-03-07T10:17:17.168 INFO:teuthology.orchestra.run.vm06.stdout:Nothing to do. 2026-03-07T10:17:17.170 INFO:teuthology.orchestra.run.vm06.stdout:Complete! 2026-03-07T10:17:17.175 INFO:teuthology.orchestra.run.vm08.stdout:No match for argument: nvme-cli 2026-03-07T10:17:17.175 INFO:teuthology.orchestra.run.vm08.stderr:No packages marked for removal. 2026-03-07T10:17:17.178 INFO:teuthology.orchestra.run.vm08.stdout:Dependencies resolved. 2026-03-07T10:17:17.179 INFO:teuthology.orchestra.run.vm08.stdout:Nothing to do. 2026-03-07T10:17:17.179 INFO:teuthology.orchestra.run.vm08.stdout:Complete! 2026-03-07T10:17:17.180 INFO:teuthology.orchestra.run.vm03.stdout:No match for argument: nvme-cli 2026-03-07T10:17:17.180 INFO:teuthology.orchestra.run.vm03.stderr:No packages marked for removal. 2026-03-07T10:17:17.183 INFO:teuthology.orchestra.run.vm03.stdout:Dependencies resolved. 2026-03-07T10:17:17.184 INFO:teuthology.orchestra.run.vm03.stdout:Nothing to do. 2026-03-07T10:17:17.184 INFO:teuthology.orchestra.run.vm03.stdout:Complete! 2026-03-07T10:17:17.631 INFO:teuthology.orchestra.run.vm08.stdout:Last metadata expiration check: 0:01:41 ago on Sat 07 Mar 2026 10:15:36 AM UTC. 2026-03-07T10:17:17.654 INFO:teuthology.orchestra.run.vm06.stdout:Last metadata expiration check: 0:01:42 ago on Sat 07 Mar 2026 10:15:35 AM UTC. 2026-03-07T10:17:17.702 INFO:teuthology.orchestra.run.vm03.stdout:Last metadata expiration check: 0:01:19 ago on Sat 07 Mar 2026 10:15:58 AM UTC. 2026-03-07T10:17:17.732 INFO:teuthology.orchestra.run.vm08.stdout:Dependencies resolved. 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout:================================================================================ 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout: Package Architecture Version Repository Size 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout:================================================================================ 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout:Installing: 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout:Installing dependencies: 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout:Transaction Summary 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout:================================================================================ 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout:Install 6 Packages 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout:Total download size: 2.3 M 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout:Installed size: 11 M 2026-03-07T10:17:17.733 INFO:teuthology.orchestra.run.vm08.stdout:Downloading Packages: 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout:Dependencies resolved. 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout: Package Architecture Version Repository Size 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout:Installing: 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout:Installing dependencies: 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout:Transaction Summary 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout:Install 6 Packages 2026-03-07T10:17:17.775 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-07T10:17:17.776 INFO:teuthology.orchestra.run.vm06.stdout:Total download size: 2.3 M 2026-03-07T10:17:17.776 INFO:teuthology.orchestra.run.vm06.stdout:Installed size: 11 M 2026-03-07T10:17:17.776 INFO:teuthology.orchestra.run.vm06.stdout:Downloading Packages: 2026-03-07T10:17:17.824 INFO:teuthology.orchestra.run.vm03.stdout:Dependencies resolved. 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout: Package Architecture Version Repository Size 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout:Installing: 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout:Installing dependencies: 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout:Transaction Summary 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout:Install 6 Packages 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout:Total download size: 2.3 M 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout:Installed size: 11 M 2026-03-07T10:17:17.825 INFO:teuthology.orchestra.run.vm03.stdout:Downloading Packages: 2026-03-07T10:17:18.018 INFO:teuthology.orchestra.run.vm08.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 314 kB/s | 44 kB 00:00 2026-03-07T10:17:18.019 INFO:teuthology.orchestra.run.vm06.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 395 kB/s | 44 kB 00:00 2026-03-07T10:17:18.042 INFO:teuthology.orchestra.run.vm08.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 441 kB/s | 72 kB 00:00 2026-03-07T10:17:18.045 INFO:teuthology.orchestra.run.vm06.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 526 kB/s | 72 kB 00:00 2026-03-07T10:17:18.112 INFO:teuthology.orchestra.run.vm03.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 339 kB/s | 44 kB 00:00 2026-03-07T10:17:18.112 INFO:teuthology.orchestra.run.vm03.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 553 kB/s | 72 kB 00:00 2026-03-07T10:17:18.136 INFO:teuthology.orchestra.run.vm06.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 722 kB/s | 84 kB 00:00 2026-03-07T10:17:18.140 INFO:teuthology.orchestra.run.vm08.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 695 kB/s | 84 kB 00:00 2026-03-07T10:17:18.142 INFO:teuthology.orchestra.run.vm08.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 1.5 MB/s | 150 kB 00:00 2026-03-07T10:17:18.148 INFO:teuthology.orchestra.run.vm06.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 1.4 MB/s | 150 kB 00:00 2026-03-07T10:17:18.171 INFO:teuthology.orchestra.run.vm06.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 4.4 MB/s | 1.2 MB 00:00 2026-03-07T10:17:18.171 INFO:teuthology.orchestra.run.vm08.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 3.9 MB/s | 1.2 MB 00:00 2026-03-07T10:17:18.177 INFO:teuthology.orchestra.run.vm03.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 1.3 MB/s | 84 kB 00:00 2026-03-07T10:17:18.179 INFO:teuthology.orchestra.run.vm03.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 2.2 MB/s | 150 kB 00:00 2026-03-07T10:17:18.207 INFO:teuthology.orchestra.run.vm06.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 12 MB/s | 837 kB 00:00 2026-03-07T10:17:18.209 INFO:teuthology.orchestra.run.vm06.stdout:-------------------------------------------------------------------------------- 2026-03-07T10:17:18.209 INFO:teuthology.orchestra.run.vm06.stdout:Total 5.3 MB/s | 2.3 MB 00:00 2026-03-07T10:17:18.242 INFO:teuthology.orchestra.run.vm03.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 4.4 MB/s | 1.2 MB 00:00 2026-03-07T10:17:18.280 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction check 2026-03-07T10:17:18.283 INFO:teuthology.orchestra.run.vm03.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 8.0 MB/s | 837 kB 00:00 2026-03-07T10:17:18.283 INFO:teuthology.orchestra.run.vm03.stdout:-------------------------------------------------------------------------------- 2026-03-07T10:17:18.283 INFO:teuthology.orchestra.run.vm03.stdout:Total 5.1 MB/s | 2.3 MB 00:00 2026-03-07T10:17:18.290 INFO:teuthology.orchestra.run.vm08.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 5.5 MB/s | 837 kB 00:00 2026-03-07T10:17:18.290 INFO:teuthology.orchestra.run.vm08.stdout:-------------------------------------------------------------------------------- 2026-03-07T10:17:18.290 INFO:teuthology.orchestra.run.vm08.stdout:Total 4.2 MB/s | 2.3 MB 00:00 2026-03-07T10:17:18.292 INFO:teuthology.orchestra.run.vm06.stdout:Transaction check succeeded. 2026-03-07T10:17:18.292 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction test 2026-03-07T10:17:18.348 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction check 2026-03-07T10:17:18.356 INFO:teuthology.orchestra.run.vm06.stdout:Transaction test succeeded. 2026-03-07T10:17:18.356 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction 2026-03-07T10:17:18.357 INFO:teuthology.orchestra.run.vm03.stdout:Transaction check succeeded. 2026-03-07T10:17:18.357 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction test 2026-03-07T10:17:18.376 INFO:teuthology.orchestra.run.vm08.stdout:Running transaction check 2026-03-07T10:17:18.381 INFO:teuthology.orchestra.run.vm08.stdout:Transaction check succeeded. 2026-03-07T10:17:18.381 INFO:teuthology.orchestra.run.vm08.stdout:Running transaction test 2026-03-07T10:17:18.413 INFO:teuthology.orchestra.run.vm03.stdout:Transaction test succeeded. 2026-03-07T10:17:18.413 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction 2026-03-07T10:17:18.444 INFO:teuthology.orchestra.run.vm08.stdout:Transaction test succeeded. 2026-03-07T10:17:18.444 INFO:teuthology.orchestra.run.vm08.stdout:Running transaction 2026-03-07T10:17:18.548 INFO:teuthology.orchestra.run.vm06.stdout: Preparing : 1/1 2026-03-07T10:17:18.564 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-07T10:17:18.572 INFO:teuthology.orchestra.run.vm03.stdout: Preparing : 1/1 2026-03-07T10:17:18.575 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-07T10:17:18.582 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:17:18.584 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-07T10:17:18.590 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:17:18.592 INFO:teuthology.orchestra.run.vm06.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:17:18.596 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-07T10:17:18.605 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:17:18.613 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:17:18.615 INFO:teuthology.orchestra.run.vm03.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:17:18.625 INFO:teuthology.orchestra.run.vm08.stdout: Preparing : 1/1 2026-03-07T10:17:18.637 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-07T10:17:18.652 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-07T10:17:18.663 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:17:18.671 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:17:18.677 INFO:teuthology.orchestra.run.vm08.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:17:18.741 INFO:teuthology.orchestra.run.vm06.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:17:18.746 INFO:teuthology.orchestra.run.vm06.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:17:18.775 INFO:teuthology.orchestra.run.vm03.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:17:18.780 INFO:teuthology.orchestra.run.vm03.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:17:18.873 INFO:teuthology.orchestra.run.vm08.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:17:18.878 INFO:teuthology.orchestra.run.vm08.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:17:19.151 INFO:teuthology.orchestra.run.vm03.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:17:19.152 INFO:teuthology.orchestra.run.vm03.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-07T10:17:19.152 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:17:19.169 INFO:teuthology.orchestra.run.vm06.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:17:19.169 INFO:teuthology.orchestra.run.vm06.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-07T10:17:19.169 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-07T10:17:19.282 INFO:teuthology.orchestra.run.vm08.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:17:19.282 INFO:teuthology.orchestra.run.vm08.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-07T10:17:19.282 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:17:19.709 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-07T10:17:19.709 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-07T10:17:19.709 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:17:19.709 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:17:19.709 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-07T10:17:19.753 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-07T10:17:19.753 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-07T10:17:19.753 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:17:19.753 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:17:19.753 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-07T10:17:19.795 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-07T10:17:19.795 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:17:19.795 INFO:teuthology.orchestra.run.vm03.stdout:Installed: 2026-03-07T10:17:19.795 INFO:teuthology.orchestra.run.vm03.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-07T10:17:19.795 INFO:teuthology.orchestra.run.vm03.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-07T10:17:19.795 INFO:teuthology.orchestra.run.vm03.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-07T10:17:19.795 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:17:19.795 INFO:teuthology.orchestra.run.vm03.stdout:Complete! 2026-03-07T10:17:19.871 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-07T10:17:19.871 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-07T10:17:19.871 INFO:teuthology.orchestra.run.vm06.stdout:Installed: 2026-03-07T10:17:19.871 INFO:teuthology.orchestra.run.vm06.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-07T10:17:19.871 INFO:teuthology.orchestra.run.vm06.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-07T10:17:19.871 INFO:teuthology.orchestra.run.vm06.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-07T10:17:19.871 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-07T10:17:19.872 INFO:teuthology.orchestra.run.vm06.stdout:Complete! 2026-03-07T10:17:19.878 DEBUG:teuthology.parallel:result is None 2026-03-07T10:17:19.903 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-07T10:17:19.903 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-07T10:17:19.903 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:17:19.903 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:17:19.903 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-07T10:17:19.928 DEBUG:teuthology.parallel:result is None 2026-03-07T10:17:20.002 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-07T10:17:20.002 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:17:20.002 INFO:teuthology.orchestra.run.vm08.stdout:Installed: 2026-03-07T10:17:20.002 INFO:teuthology.orchestra.run.vm08.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-07T10:17:20.002 INFO:teuthology.orchestra.run.vm08.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-07T10:17:20.002 INFO:teuthology.orchestra.run.vm08.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-07T10:17:20.002 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:17:20.002 INFO:teuthology.orchestra.run.vm08.stdout:Complete! 2026-03-07T10:17:20.077 DEBUG:teuthology.parallel:result is None 2026-03-07T10:17:20.077 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-07T10:17:20.123 INFO:tasks.cephadm:Config: {'conf': {'global': {'mon election default strategy': 1}, 'mgr': {'debug mgr': 20, 'debug ms': 1, 'mgr/cephadm/use_agent': False}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'MON_DOWN', 'mons down', 'mon down', 'out of quorum', 'CEPHADM_STRAY_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': '340d3c24fc6ae7529322dc7ccee6c6cb2589da0a', 'cephadm_binary_url': 'https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm', 'containers': {'image': 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5'}} 2026-03-07T10:17:20.123 INFO:tasks.cephadm:Provided image contains tag or digest, using it as is 2026-03-07T10:17:20.123 INFO:tasks.cephadm:Cluster image is harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 2026-03-07T10:17:20.123 INFO:tasks.cephadm:Cluster fsid is d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:17:20.123 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-07T10:17:20.124 INFO:tasks.cephadm:Monitor IPs: {'mon.a': '192.168.123.103', 'mon.b': '192.168.123.106', 'mon.c': '192.168.123.108'} 2026-03-07T10:17:20.124 INFO:tasks.cephadm:First mon is mon.a on vm03 2026-03-07T10:17:20.124 INFO:tasks.cephadm:First mgr is a 2026-03-07T10:17:20.124 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-07T10:17:20.124 DEBUG:teuthology.orchestra.run.vm03:> sudo hostname $(hostname -s) 2026-03-07T10:17:20.170 DEBUG:teuthology.orchestra.run.vm06:> sudo hostname $(hostname -s) 2026-03-07T10:17:20.208 DEBUG:teuthology.orchestra.run.vm08:> sudo hostname $(hostname -s) 2026-03-07T10:17:20.260 INFO:tasks.cephadm:Downloading cephadm from url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm 2026-03-07T10:17:20.260 DEBUG:teuthology.orchestra.run.vm03:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-07T10:17:21.423 INFO:teuthology.orchestra.run.vm03.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 7 10:17 /home/ubuntu/cephtest/cephadm 2026-03-07T10:17:21.423 DEBUG:teuthology.orchestra.run.vm06:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-07T10:17:22.502 INFO:teuthology.orchestra.run.vm06.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 7 10:17 /home/ubuntu/cephtest/cephadm 2026-03-07T10:17:22.502 DEBUG:teuthology.orchestra.run.vm08:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-07T10:17:23.562 INFO:teuthology.orchestra.run.vm08.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 7 10:17 /home/ubuntu/cephtest/cephadm 2026-03-07T10:17:23.562 DEBUG:teuthology.orchestra.run.vm03:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-07T10:17:23.583 DEBUG:teuthology.orchestra.run.vm06:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-07T10:17:23.605 DEBUG:teuthology.orchestra.run.vm08:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-07T10:17:23.630 INFO:tasks.cephadm:Pulling image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 on all hosts... 2026-03-07T10:17:23.630 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-07T10:17:23.633 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-07T10:17:23.648 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-07T10:17:23.810 INFO:teuthology.orchestra.run.vm03.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:17:23.886 INFO:teuthology.orchestra.run.vm08.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:17:23.896 INFO:teuthology.orchestra.run.vm06.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:18:04.071 INFO:teuthology.orchestra.run.vm06.stdout:{ 2026-03-07T10:18:04.071 INFO:teuthology.orchestra.run.vm06.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-07T10:18:04.071 INFO:teuthology.orchestra.run.vm06.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-07T10:18:04.071 INFO:teuthology.orchestra.run.vm06.stdout: "repo_digests": [ 2026-03-07T10:18:04.071 INFO:teuthology.orchestra.run.vm06.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-07T10:18:04.071 INFO:teuthology.orchestra.run.vm06.stdout: ] 2026-03-07T10:18:04.071 INFO:teuthology.orchestra.run.vm06.stdout:} 2026-03-07T10:18:04.087 INFO:teuthology.orchestra.run.vm03.stdout:{ 2026-03-07T10:18:04.087 INFO:teuthology.orchestra.run.vm03.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-07T10:18:04.087 INFO:teuthology.orchestra.run.vm03.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-07T10:18:04.087 INFO:teuthology.orchestra.run.vm03.stdout: "repo_digests": [ 2026-03-07T10:18:04.087 INFO:teuthology.orchestra.run.vm03.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-07T10:18:04.087 INFO:teuthology.orchestra.run.vm03.stdout: ] 2026-03-07T10:18:04.087 INFO:teuthology.orchestra.run.vm03.stdout:} 2026-03-07T10:18:05.228 INFO:teuthology.orchestra.run.vm08.stdout:{ 2026-03-07T10:18:05.229 INFO:teuthology.orchestra.run.vm08.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-07T10:18:05.229 INFO:teuthology.orchestra.run.vm08.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-07T10:18:05.229 INFO:teuthology.orchestra.run.vm08.stdout: "repo_digests": [ 2026-03-07T10:18:05.229 INFO:teuthology.orchestra.run.vm08.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-07T10:18:05.229 INFO:teuthology.orchestra.run.vm08.stdout: ] 2026-03-07T10:18:05.229 INFO:teuthology.orchestra.run.vm08.stdout:} 2026-03-07T10:18:05.255 DEBUG:teuthology.orchestra.run.vm03:> sudo mkdir -p /etc/ceph 2026-03-07T10:18:05.286 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /etc/ceph 2026-03-07T10:18:05.317 DEBUG:teuthology.orchestra.run.vm08:> sudo mkdir -p /etc/ceph 2026-03-07T10:18:05.353 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 777 /etc/ceph 2026-03-07T10:18:05.382 DEBUG:teuthology.orchestra.run.vm06:> sudo chmod 777 /etc/ceph 2026-03-07T10:18:05.415 DEBUG:teuthology.orchestra.run.vm08:> sudo chmod 777 /etc/ceph 2026-03-07T10:18:05.447 INFO:tasks.cephadm:Writing seed config... 2026-03-07T10:18:05.447 INFO:tasks.cephadm: override: [global] mon election default strategy = 1 2026-03-07T10:18:05.447 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-07T10:18:05.447 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-07T10:18:05.447 INFO:tasks.cephadm: override: [mgr] mgr/cephadm/use_agent = False 2026-03-07T10:18:05.447 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-07T10:18:05.447 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-07T10:18:05.447 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-07T10:18:05.447 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-07T10:18:05.447 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-07T10:18:05.447 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-07T10:18:05.448 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-07T10:18:05.448 DEBUG:teuthology.orchestra.run.vm03:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-07T10:18:05.467 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = d33fdf60-1a0e-11f1-a719-83e365122cb4 mon election default strategy = 1 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = true bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 mgr/cephadm/use_agent = False [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-07T10:18:05.468 DEBUG:teuthology.orchestra.run.vm03:mon.a> sudo journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.a.service 2026-03-07T10:18:05.512 DEBUG:teuthology.orchestra.run.vm03:mgr.a> sudo journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.a.service 2026-03-07T10:18:05.555 INFO:tasks.cephadm:Bootstrapping... 2026-03-07T10:18:05.555 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 -v bootstrap --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id a --orphan-initial-daemons --skip-monitoring-stack --mon-ip 192.168.123.103 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:18:05.719 INFO:teuthology.orchestra.run.vm03.stdout:-------------------------------------------------------------------------------- 2026-03-07T10:18:05.720 INFO:teuthology.orchestra.run.vm03.stdout:cephadm ['--image', 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5', '-v', 'bootstrap', '--fsid', 'd33fdf60-1a0e-11f1-a719-83e365122cb4', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-id', 'a', '--mgr-id', 'a', '--orphan-initial-daemons', '--skip-monitoring-stack', '--mon-ip', '192.168.123.103', '--skip-admin-label'] 2026-03-07T10:18:05.720 INFO:teuthology.orchestra.run.vm03.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-07T10:18:05.720 INFO:teuthology.orchestra.run.vm03.stdout:Verifying podman|docker is present... 2026-03-07T10:18:05.747 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 5.8.0 2026-03-07T10:18:05.747 INFO:teuthology.orchestra.run.vm03.stdout:Verifying lvm2 is present... 2026-03-07T10:18:05.747 INFO:teuthology.orchestra.run.vm03.stdout:Verifying time synchronization is in place... 2026-03-07T10:18:05.755 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-07T10:18:05.756 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-07T10:18:05.762 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-07T10:18:05.762 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout inactive 2026-03-07T10:18:05.770 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout enabled 2026-03-07T10:18:05.777 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout active 2026-03-07T10:18:05.777 INFO:teuthology.orchestra.run.vm03.stdout:Unit chronyd.service is enabled and running 2026-03-07T10:18:05.777 INFO:teuthology.orchestra.run.vm03.stdout:Repeating the final host check... 2026-03-07T10:18:05.799 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 5.8.0 2026-03-07T10:18:05.799 INFO:teuthology.orchestra.run.vm03.stdout:podman (/bin/podman) version 5.8.0 is present 2026-03-07T10:18:05.799 INFO:teuthology.orchestra.run.vm03.stdout:systemctl is present 2026-03-07T10:18:05.799 INFO:teuthology.orchestra.run.vm03.stdout:lvcreate is present 2026-03-07T10:18:05.807 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-07T10:18:05.807 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-07T10:18:05.815 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-07T10:18:05.815 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout inactive 2026-03-07T10:18:05.823 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout enabled 2026-03-07T10:18:05.830 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout active 2026-03-07T10:18:05.830 INFO:teuthology.orchestra.run.vm03.stdout:Unit chronyd.service is enabled and running 2026-03-07T10:18:05.830 INFO:teuthology.orchestra.run.vm03.stdout:Host looks OK 2026-03-07T10:18:05.830 INFO:teuthology.orchestra.run.vm03.stdout:Cluster fsid: d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:18:05.831 INFO:teuthology.orchestra.run.vm03.stdout:Acquiring lock 139714521433856 on /run/cephadm/d33fdf60-1a0e-11f1-a719-83e365122cb4.lock 2026-03-07T10:18:05.831 INFO:teuthology.orchestra.run.vm03.stdout:Lock 139714521433856 acquired on /run/cephadm/d33fdf60-1a0e-11f1-a719-83e365122cb4.lock 2026-03-07T10:18:05.831 INFO:teuthology.orchestra.run.vm03.stdout:Verifying IP 192.168.123.103 port 3300 ... 2026-03-07T10:18:05.832 INFO:teuthology.orchestra.run.vm03.stdout:Verifying IP 192.168.123.103 port 6789 ... 2026-03-07T10:18:05.832 INFO:teuthology.orchestra.run.vm03.stdout:Base mon IP(s) is [192.168.123.103:3300, 192.168.123.103:6789], mon addrv is [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-07T10:18:05.836 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.103 metric 100 2026-03-07T10:18:05.836 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.103 metric 100 2026-03-07T10:18:05.840 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-07T10:18:05.840 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-07T10:18:05.843 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-07T10:18:05.843 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-07T10:18:05.843 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-07T10:18:05.843 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-03-07T10:18:05.843 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:3/64 scope link noprefixroute 2026-03-07T10:18:05.844 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-07T10:18:05.844 INFO:teuthology.orchestra.run.vm03.stdout:Mon IP `192.168.123.103` is in CIDR network `192.168.123.0/24` 2026-03-07T10:18:05.844 INFO:teuthology.orchestra.run.vm03.stdout:Mon IP `192.168.123.103` is in CIDR network `192.168.123.0/24` 2026-03-07T10:18:05.844 INFO:teuthology.orchestra.run.vm03.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-03-07T10:18:05.845 INFO:teuthology.orchestra.run.vm03.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-07T10:18:05.845 INFO:teuthology.orchestra.run.vm03.stdout:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:18:06.653 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 2026-03-07T10:18:06.653 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Trying to pull harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:18:06.653 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Getting image source signatures 2026-03-07T10:18:06.653 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying blob sha256:89f108f95c9b33ae21c5514f17c1bd5ca646e21d3c5e8ac1e117cf65bcd40261 2026-03-07T10:18:06.653 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying config sha256:8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 2026-03-07T10:18:06.653 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Writing manifest to image destination 2026-03-07T10:18:06.916 INFO:teuthology.orchestra.run.vm03.stdout:ceph: stdout ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable) 2026-03-07T10:18:06.916 INFO:teuthology.orchestra.run.vm03.stdout:Ceph version: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable) 2026-03-07T10:18:06.916 INFO:teuthology.orchestra.run.vm03.stdout:Extracting ceph user uid/gid from container image... 2026-03-07T10:18:07.023 INFO:teuthology.orchestra.run.vm03.stdout:stat: stdout 167 167 2026-03-07T10:18:07.023 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial keys... 2026-03-07T10:18:07.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQBf+6tppEwcBRAAj0dzNYrukP9aFGzZ1gTe6w== 2026-03-07T10:18:07.246 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQBf+6tpHkOSDBAAElRJhveg154zK6Ignz/rFg== 2026-03-07T10:18:07.360 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQBf+6tpuGXCExAAYXPrxgCom3b7Sn2HURwagA== 2026-03-07T10:18:07.360 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial monmap... 2026-03-07T10:18:07.480 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-07T10:18:07.480 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-07T10:18:07.480 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:18:07.480 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-07T10:18:07.481 INFO:teuthology.orchestra.run.vm03.stdout:monmaptool for a [v2:192.168.123.103:3300,v1:192.168.123.103:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-07T10:18:07.481 INFO:teuthology.orchestra.run.vm03.stdout:setting min_mon_release = quincy 2026-03-07T10:18:07.481 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: set fsid to d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:18:07.481 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-07T10:18:07.481 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:18:07.481 INFO:teuthology.orchestra.run.vm03.stdout:Creating mon... 2026-03-07T10:18:08.558 INFO:teuthology.orchestra.run.vm03.stdout:create mon.a on 2026-03-07T10:18:08.917 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-07T10:18:09.101 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4.target → /etc/systemd/system/ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4.target. 2026-03-07T10:18:09.101 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4.target → /etc/systemd/system/ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4.target. 2026-03-07T10:18:09.288 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.a 2026-03-07T10:18:09.288 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to reset failed state of unit ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.a.service: Unit ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.a.service not loaded. 2026-03-07T10:18:09.461 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4.target.wants/ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.a.service → /etc/systemd/system/ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@.service. 2026-03-07T10:18:09.669 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-07T10:18:09.669 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to enable service . firewalld.service is not available 2026-03-07T10:18:09.669 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mon to start... 2026-03-07T10:18:09.669 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mon... 2026-03-07T10:18:10.021 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout cluster: 2026-03-07T10:18:10.021 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout id: d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:18:10.021 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout services: 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum a (age 0.263748s) 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout data: 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout pgs: 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:mon is available 2026-03-07T10:18:10.022 INFO:teuthology.orchestra.run.vm03.stdout:Assimilating anything we can from ceph.conf... 2026-03-07T10:18:10.362 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:10.362 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [global] 2026-03-07T10:18:10.362 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout fsid = d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:18:10.362 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-07T10:18:10.362 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr/cephadm/use_agent = False 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [osd] 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-07T10:18:10.363 INFO:teuthology.orchestra.run.vm03.stdout:Generating new minimal ceph.conf... 2026-03-07T10:18:11.009 INFO:teuthology.orchestra.run.vm03.stdout:Restarting the monitor... 2026-03-07T10:18:11.419 INFO:teuthology.orchestra.run.vm03.stdout:Setting public_network to 192.168.123.0/24 in mon config section 2026-03-07T10:18:11.770 INFO:teuthology.orchestra.run.vm03.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-07T10:18:11.772 INFO:teuthology.orchestra.run.vm03.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:18:11.772 INFO:teuthology.orchestra.run.vm03.stdout:Creating mgr... 2026-03-07T10:18:11.772 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-07T10:18:11.773 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-07T10:18:11.960 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.a 2026-03-07T10:18:11.960 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to reset failed state of unit ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.a.service: Unit ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.a.service not loaded. 2026-03-07T10:18:12.117 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4.target.wants/ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.a.service → /etc/systemd/system/ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@.service. 2026-03-07T10:18:12.299 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-07T10:18:12.299 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to enable service . firewalld.service is not available 2026-03-07T10:18:12.299 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-07T10:18:12.299 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to open ports <[9283, 8765]>. firewalld.service is not available 2026-03-07T10:18:12.299 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr to start... 2026-03-07T10:18:12.299 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr... 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "d33fdf60-1a0e-11f1-a719-83e365122cb4", 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 1, 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:12.665 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:18:09:707778+0000", 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:18:09.708446+0000", 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:18:12.666 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (1/15)... 2026-03-07T10:18:15.218 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "d33fdf60-1a0e-11f1-a719-83e365122cb4", 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:18:15.220 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:18:09:707778+0000", 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:18:09.708446+0000", 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:18:15.221 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (2/15)... 2026-03-07T10:18:15.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:15 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/1329488373' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:18:17.546 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:17.546 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:18:17.546 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "d33fdf60-1a0e-11f1-a719-83e365122cb4", 2026-03-07T10:18:17.546 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 6, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:18:09:707778+0000", 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:17.547 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:17.548 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:18:17.548 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:17.548 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:18:09.708446+0000", 2026-03-07T10:18:17.548 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:17.548 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:17.548 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:18:17.548 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:18:17.548 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (3/15)... 2026-03-07T10:18:17.794 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:17 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/497585882' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:18:17.794 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:17 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:17.791+0000 7fd3688bb100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:18:18.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:17 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:17.922+0000 7fd3688bb100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:18:18.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:18 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:18.427+0000 7fd3688bb100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:18:18.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:18 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:18.577+0000 7fd3688bb100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:18:19.615 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:19 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:19.294+0000 7fd3688bb100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:18:19.886 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:19.886 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:18:19.886 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "d33fdf60-1a0e-11f1-a719-83e365122cb4", 2026-03-07T10:18:19.886 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 8, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:18:09:707778+0000", 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:18:09.708446+0000", 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:18:19.887 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (4/15)... 2026-03-07T10:18:20.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:19 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3239173815' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:18:20.632 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:20 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:20.374+0000 7fd3688bb100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:18:20.632 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:20 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:20.499+0000 7fd3688bb100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:18:20.632 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:20 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:20.629+0000 7fd3688bb100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:18:21.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:20 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:20.885+0000 7fd3688bb100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:18:21.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:21 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:21.006+0000 7fd3688bb100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:18:21.637 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:21 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:21.302+0000 7fd3688bb100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:18:21.913 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:21 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:21.634+0000 7fd3688bb100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:18:22.179 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:22.011+0000 7fd3688bb100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:18:22.179 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:22.135+0000 7fd3688bb100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "d33fdf60-1a0e-11f1-a719-83e365122cb4", 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 10, 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:18:22.199 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:18:09:707778+0000", 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:18:09.708446+0000", 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:18:22.200 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (5/15)... 2026-03-07T10:18:22.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: Activating manager daemon a 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: mgrmap e2: a(active, starting, since 0.00434833s) 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: Manager daemon a is now available 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/882125407' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' 2026-03-07T10:18:22.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:22 vm03 ceph-mon[50786]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' 2026-03-07T10:18:24.261 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:24 vm03 ceph-mon[50786]: mgrmap e3: a(active, since 1.00908s) 2026-03-07T10:18:24.587 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:24.587 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:18:24.587 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "d33fdf60-1a0e-11f1-a719-83e365122cb4", 2026-03-07T10:18:24.587 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:18:24.587 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:18:24.587 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:18:24.587 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:18:24.587 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:24.587 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:18:24.587 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 13, 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:18:24.588 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:18:09:707778+0000", 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:18:09.708446+0000", 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:18:24.589 INFO:teuthology.orchestra.run.vm03.stdout:mgr is available 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [global] 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout fsid = d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [osd] 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-07T10:18:24.923 INFO:teuthology.orchestra.run.vm03.stdout:Enabling cephadm module... 2026-03-07T10:18:25.152 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:25 vm03 ceph-mon[50786]: mgrmap e4: a(active, since 2s) 2026-03-07T10:18:25.152 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:25 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/360375697' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:18:25.152 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:25 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3487093848' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-07T10:18:25.152 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:25 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3487093848' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-07T10:18:26.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:26 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: ignoring --setuser ceph since I am not root 2026-03-07T10:18:26.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:26 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: ignoring --setgroup ceph since I am not root 2026-03-07T10:18:26.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:26 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/4245075995' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-07T10:18:26.636 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:18:26.636 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-03-07T10:18:26.636 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-07T10:18:26.636 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "active_name": "a", 2026-03-07T10:18:26.636 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-07T10:18:26.636 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:18:26.636 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for the mgr to restart... 2026-03-07T10:18:26.636 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr epoch 5... 2026-03-07T10:18:26.711 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:26 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:26.456+0000 7fe5e7a15100 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-07T10:18:26.711 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:26 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:26.589+0000 7fe5e7a15100 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-07T10:18:27.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:27 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/4245075995' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-07T10:18:27.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:27 vm03 ceph-mon[50786]: mgrmap e5: a(active, since 4s) 2026-03-07T10:18:27.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:27 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/767823830' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-07T10:18:28.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:27 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:27.693+0000 7fe5e7a15100 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-07T10:18:28.897 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:28 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:28.532+0000 7fe5e7a15100 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-07T10:18:28.898 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:28 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:28.652+0000 7fe5e7a15100 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-07T10:18:29.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:28 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:28.895+0000 7fe5e7a15100 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-07T10:18:31.021 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:30 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:30.680+0000 7fe5e7a15100 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-07T10:18:31.290 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:31 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:31.018+0000 7fe5e7a15100 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:18:31.290 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:31 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:31.161+0000 7fe5e7a15100 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-07T10:18:31.556 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:31 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:31.287+0000 7fe5e7a15100 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-07T10:18:31.556 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:31 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:31.429+0000 7fe5e7a15100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:18:31.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:31 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:31.553+0000 7fe5e7a15100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:18:32.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:32 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:32.063+0000 7fe5e7a15100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:18:32.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:32 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:32.213+0000 7fe5e7a15100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:18:33.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:32 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:32.932+0000 7fe5e7a15100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:18:34.227 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:33 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:33.968+0000 7fe5e7a15100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:18:34.227 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:34 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:34.095+0000 7fe5e7a15100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:18:34.488 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:34 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:34.224+0000 7fe5e7a15100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:18:34.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:34 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:34.485+0000 7fe5e7a15100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:18:34.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:34 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:34.606+0000 7fe5e7a15100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:18:35.253 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:34 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:34.910+0000 7fe5e7a15100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:18:35.623 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:35 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:35.250+0000 7fe5e7a15100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:18:35.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:35 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:35.620+0000 7fe5e7a15100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:18:35.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:35 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:35.739+0000 7fe5e7a15100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:18:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:36 vm03 ceph-mon[50786]: Active manager daemon a restarted 2026-03-07T10:18:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:36 vm03 ceph-mon[50786]: Activating manager daemon a 2026-03-07T10:18:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:36 vm03 ceph-mon[50786]: osdmap e2: 0 total, 0 up, 0 in 2026-03-07T10:18:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:36 vm03 ceph-mon[50786]: mgrmap e6: a(active, starting, since 0.285867s) 2026-03-07T10:18:37.086 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:18:37.086 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-03-07T10:18:37.086 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-07T10:18:37.086 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:18:37.086 INFO:teuthology.orchestra.run.vm03.stdout:mgr epoch 5 is available 2026-03-07T10:18:37.086 INFO:teuthology.orchestra.run.vm03.stdout:Setting orchestrator backend to cephadm... 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: Manager daemon a is now available 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: Found migration_current of "None". Setting to last migration. 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:18:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:18:37.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:18:37.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:37.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:37 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:37.843 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-07T10:18:37.844 INFO:teuthology.orchestra.run.vm03.stdout:Generating ssh key... 2026-03-07T10:18:38.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-mon[50786]: mgrmap e7: a(active, since 1.30239s) 2026-03-07T10:18:38.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-mon[50786]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-07T10:18:38.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-mon[50786]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-07T10:18:38.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:38.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:18:38.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:18:38.661 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: Generating public/private rsa key pair. 2026-03-07T10:18:38.661 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: Your identification has been saved in /tmp/tmpdpz7xq1x/key 2026-03-07T10:18:38.661 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: Your public key has been saved in /tmp/tmpdpz7xq1x/key.pub 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: The key fingerprint is: 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: SHA256:idzuG8JLzXWpBjWCvRYFlTjLBGw9xsvrdYFT+SCSx8s ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: The key's randomart image is: 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: +---[RSA 3072]----+ 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: | ..+.*o. . | 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: | oo@.= + | 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: | ..=+Oo= o | 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: | . o=*E.... | 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: | o S...o. | 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: | . =.o.o. | 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: | +.=.o. | 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: | . +.o | 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: | . o. | 2026-03-07T10:18:38.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:38 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: +----[SHA256]-----+ 2026-03-07T10:18:38.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDxvjnoxuY66369ItQCnNDOoNxQyXeCMYYGnx5KMHRn+X5Fy6wJouWzTMXUd8yf+vBlIbKk/1iRy48ZMtf8Op8rbffOsM8LRuOAcR3sCm4uE4xbfhvjN9BNw4d6YuR1JBGdwm7GJMotaKQSCQB01+bePWGrxFOP+JmX+HLa1UMjCSkyI1pJgU7oYrdtyR5WhqN4zXxnN2ts/xmkI7VCORTftwNITTUXpkBIZbAfQEIRZeKz1XgAwYPhVyRDpB0r6ct4ZG/grSLlCtRVaIMHitAa8eLSmw5CTiWyDhInL8dsTPlPseArkf6nTuPlN5RVvkot/6cYEXG6hHZtfccoSt8hQKzEkCXhjAdnG47G1Gk64bocLzWxLFl0HjDxCArm+JiwxSMVYMxm41gPepu18yLXQRb+jIRGJZ1mzKVfW39qrK3c9ZCUu/ag+cHjazG5yx0dEUfJ6dcckT5FsMq48TuEF0TACCqTrZumIDzhw7t/JFA5e2AY5U8ftofJEcQD3WU= ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:18:38.713 INFO:teuthology.orchestra.run.vm03.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-07T10:18:38.713 INFO:teuthology.orchestra.run.vm03.stdout:Adding key to root@localhost authorized_keys... 2026-03-07T10:18:38.714 INFO:teuthology.orchestra.run.vm03.stdout:Adding host vm03... 2026-03-07T10:18:39.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:18:39.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: [07/Mar/2026:10:18:37] ENGINE Bus STARTING 2026-03-07T10:18:39.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: [07/Mar/2026:10:18:37] ENGINE Serving on https://192.168.123.103:7150 2026-03-07T10:18:39.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: [07/Mar/2026:10:18:37] ENGINE Client ('192.168.123.103', 44192) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:18:39.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:18:39.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: [07/Mar/2026:10:18:37] ENGINE Serving on http://192.168.123.103:8765 2026-03-07T10:18:39.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: [07/Mar/2026:10:18:37] ENGINE Bus STARTED 2026-03-07T10:18:39.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:18:39.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: Generating ssh key... 2026-03-07T10:18:39.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:39.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:39.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:39 vm03 ceph-mon[50786]: mgrmap e8: a(active, since 2s) 2026-03-07T10:18:40.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:40 vm03 ceph-mon[50786]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:18:40.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:40 vm03 ceph-mon[50786]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm03", "addr": "192.168.123.103", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:18:40.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:40 vm03 ceph-mon[50786]: Deploying cephadm binary to vm03 2026-03-07T10:18:41.372 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Added host 'vm03' with addr '192.168.123.103' 2026-03-07T10:18:41.372 INFO:teuthology.orchestra.run.vm03.stdout:Deploying unmanaged mon service... 2026-03-07T10:18:41.748 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-07T10:18:41.748 INFO:teuthology.orchestra.run.vm03.stdout:Deploying unmanaged mgr service... 2026-03-07T10:18:42.129 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-07T10:18:42.333 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:42 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:42.333 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:42 vm03 ceph-mon[50786]: Added host vm03 2026-03-07T10:18:42.333 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:42 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:18:42.333 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:42 vm03 ceph-mon[50786]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:18:42.334 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:42 vm03 ceph-mon[50786]: Saving service mon spec with placement count:5 2026-03-07T10:18:42.334 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:42 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:42.334 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:42 vm03 ceph-mon[50786]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:18:42.334 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:42 vm03 ceph-mon[50786]: Saving service mgr spec with placement count:2 2026-03-07T10:18:42.334 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:42 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:42.928 INFO:teuthology.orchestra.run.vm03.stdout:Enabling the dashboard module... 2026-03-07T10:18:43.482 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:43 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3081026258' entity='client.admin' 2026-03-07T10:18:43.482 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:43 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:43.482 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:43 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3173881785' entity='client.admin' 2026-03-07T10:18:43.482 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:43 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:43.482 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:43 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/2109935597' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-07T10:18:43.482 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:43 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:44.203 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:43 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: ignoring --setuser ceph since I am not root 2026-03-07T10:18:44.204 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:43 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: ignoring --setgroup ceph since I am not root 2026-03-07T10:18:44.456 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:44 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:44.322+0000 7f2da38ec100 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-07T10:18:44.456 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:44 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:44.452+0000 7f2da38ec100 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-07T10:18:44.523 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:18:44.523 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-03-07T10:18:44.523 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-07T10:18:44.523 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "active_name": "a", 2026-03-07T10:18:44.523 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-07T10:18:44.523 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:18:44.523 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for the mgr to restart... 2026-03-07T10:18:44.523 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr epoch 9... 2026-03-07T10:18:45.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:44 vm03 ceph-mon[50786]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:18:45.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:44 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/2109935597' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-07T10:18:45.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:44 vm03 ceph-mon[50786]: mgrmap e9: a(active, since 8s) 2026-03-07T10:18:45.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:44 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/2790889367' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-07T10:18:45.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:45 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:45.577+0000 7f2da38ec100 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-07T10:18:46.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:46 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:46.395+0000 7f2da38ec100 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-07T10:18:46.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:46 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:46.509+0000 7f2da38ec100 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-07T10:18:47.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:46 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:46.737+0000 7f2da38ec100 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-07T10:18:48.768 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:48 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:48.449+0000 7f2da38ec100 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-07T10:18:49.140 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:48 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:48.765+0000 7f2da38ec100 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:18:49.140 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:48 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:48.892+0000 7f2da38ec100 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-07T10:18:49.140 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:49 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:49.006+0000 7f2da38ec100 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-07T10:18:49.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:49 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:49.137+0000 7f2da38ec100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:18:49.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:49 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:49.253+0000 7f2da38ec100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:18:50.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:49 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:49.737+0000 7f2da38ec100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:18:50.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:49 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:49.879+0000 7f2da38ec100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:18:50.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:50 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:50.556+0000 7f2da38ec100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:18:51.830 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:51 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:51.569+0000 7f2da38ec100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:18:51.830 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:51 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:51.694+0000 7f2da38ec100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:18:52.085 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:51 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:51.827+0000 7f2da38ec100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:18:52.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:52 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:52.082+0000 7f2da38ec100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:18:52.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:52 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:52.207+0000 7f2da38ec100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:18:52.845 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:52 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:52.507+0000 7f2da38ec100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:18:53.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:52 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:52.842+0000 7f2da38ec100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:53.188+0000 7f2da38ec100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:18:53.305+0000 7f2da38ec100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: Active manager daemon a restarted 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: Activating manager daemon a 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: osdmap e3: 0 total, 0 up, 0 in 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: mgrmap e10: a(active, starting, since 0.00640608s) 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: Manager daemon a is now available 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:18:53.453 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:53 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:18:54.372 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:18:54.372 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-03-07T10:18:54.372 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-07T10:18:54.372 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:18:54.373 INFO:teuthology.orchestra.run.vm03.stdout:mgr epoch 9 is available 2026-03-07T10:18:54.373 INFO:teuthology.orchestra.run.vm03.stdout:Generating a dashboard self-signed certificate... 2026-03-07T10:18:54.620 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:54 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:18:54.620 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:54 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:54.620 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:54 vm03 ceph-mon[50786]: mgrmap e11: a(active, since 1.00883s) 2026-03-07T10:18:54.837 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-07T10:18:54.837 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial admin user... 2026-03-07T10:18:55.328 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$LTHg/mkAEGSJMoLRaxvHm.VA7rY37t.2lr9YjivsXXAty4OPRu44e", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1772878735, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-07T10:18:55.328 INFO:teuthology.orchestra.run.vm03.stdout:Fetching dashboard port number... 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: [07/Mar/2026:10:18:54] ENGINE Bus STARTING 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: [07/Mar/2026:10:18:54] ENGINE Serving on http://192.168.123.103:8765 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: [07/Mar/2026:10:18:54] ENGINE Serving on https://192.168.123.103:7150 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: [07/Mar/2026:10:18:54] ENGINE Bus STARTED 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: [07/Mar/2026:10:18:54] ENGINE Client ('192.168.123.103', 57218) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: from='client.14168 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:55.613 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:55.659 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 8443 2026-03-07T10:18:55.659 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-07T10:18:55.659 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-07T10:18:55.660 INFO:teuthology.orchestra.run.vm03.stdout:Ceph Dashboard is now available at: 2026-03-07T10:18:55.661 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:18:55.661 INFO:teuthology.orchestra.run.vm03.stdout: URL: https://vm03.local:8443/ 2026-03-07T10:18:55.661 INFO:teuthology.orchestra.run.vm03.stdout: User: admin 2026-03-07T10:18:55.661 INFO:teuthology.orchestra.run.vm03.stdout: Password: gz9k7jgnbo 2026-03-07T10:18:55.661 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:18:55.661 INFO:teuthology.orchestra.run.vm03.stdout:Saving cluster configuration to /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config directory 2026-03-07T10:18:56.049 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout:Or, if you are only running a single cluster on this host: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: ceph telemetry on 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout:For more information see: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:18:56.050 INFO:teuthology.orchestra.run.vm03.stdout:Bootstrap complete. 2026-03-07T10:18:56.077 INFO:tasks.cephadm:Fetching config... 2026-03-07T10:18:56.077 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-07T10:18:56.077 DEBUG:teuthology.orchestra.run.vm03:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-07T10:18:56.094 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-07T10:18:56.094 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-07T10:18:56.094 DEBUG:teuthology.orchestra.run.vm03:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-07T10:18:56.157 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-07T10:18:56.157 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-07T10:18:56.157 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/keyring of=/dev/stdout 2026-03-07T10:18:56.222 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-07T10:18:56.222 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-07T10:18:56.222 DEBUG:teuthology.orchestra.run.vm03:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-07T10:18:56.276 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-07T10:18:56.277 DEBUG:teuthology.orchestra.run.vm03:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDxvjnoxuY66369ItQCnNDOoNxQyXeCMYYGnx5KMHRn+X5Fy6wJouWzTMXUd8yf+vBlIbKk/1iRy48ZMtf8Op8rbffOsM8LRuOAcR3sCm4uE4xbfhvjN9BNw4d6YuR1JBGdwm7GJMotaKQSCQB01+bePWGrxFOP+JmX+HLa1UMjCSkyI1pJgU7oYrdtyR5WhqN4zXxnN2ts/xmkI7VCORTftwNITTUXpkBIZbAfQEIRZeKz1XgAwYPhVyRDpB0r6ct4ZG/grSLlCtRVaIMHitAa8eLSmw5CTiWyDhInL8dsTPlPseArkf6nTuPlN5RVvkot/6cYEXG6hHZtfccoSt8hQKzEkCXhjAdnG47G1Gk64bocLzWxLFl0HjDxCArm+JiwxSMVYMxm41gPepu18yLXQRb+jIRGJZ1mzKVfW39qrK3c9ZCUu/ag+cHjazG5yx0dEUfJ6dcckT5FsMq48TuEF0TACCqTrZumIDzhw7t/JFA5e2AY5U8ftofJEcQD3WU= ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-07T10:18:56.348 INFO:teuthology.orchestra.run.vm03.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDxvjnoxuY66369ItQCnNDOoNxQyXeCMYYGnx5KMHRn+X5Fy6wJouWzTMXUd8yf+vBlIbKk/1iRy48ZMtf8Op8rbffOsM8LRuOAcR3sCm4uE4xbfhvjN9BNw4d6YuR1JBGdwm7GJMotaKQSCQB01+bePWGrxFOP+JmX+HLa1UMjCSkyI1pJgU7oYrdtyR5WhqN4zXxnN2ts/xmkI7VCORTftwNITTUXpkBIZbAfQEIRZeKz1XgAwYPhVyRDpB0r6ct4ZG/grSLlCtRVaIMHitAa8eLSmw5CTiWyDhInL8dsTPlPseArkf6nTuPlN5RVvkot/6cYEXG6hHZtfccoSt8hQKzEkCXhjAdnG47G1Gk64bocLzWxLFl0HjDxCArm+JiwxSMVYMxm41gPepu18yLXQRb+jIRGJZ1mzKVfW39qrK3c9ZCUu/ag+cHjazG5yx0dEUfJ6dcckT5FsMq48TuEF0TACCqTrZumIDzhw7t/JFA5e2AY5U8ftofJEcQD3WU= ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:18:56.358 DEBUG:teuthology.orchestra.run.vm06:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDxvjnoxuY66369ItQCnNDOoNxQyXeCMYYGnx5KMHRn+X5Fy6wJouWzTMXUd8yf+vBlIbKk/1iRy48ZMtf8Op8rbffOsM8LRuOAcR3sCm4uE4xbfhvjN9BNw4d6YuR1JBGdwm7GJMotaKQSCQB01+bePWGrxFOP+JmX+HLa1UMjCSkyI1pJgU7oYrdtyR5WhqN4zXxnN2ts/xmkI7VCORTftwNITTUXpkBIZbAfQEIRZeKz1XgAwYPhVyRDpB0r6ct4ZG/grSLlCtRVaIMHitAa8eLSmw5CTiWyDhInL8dsTPlPseArkf6nTuPlN5RVvkot/6cYEXG6hHZtfccoSt8hQKzEkCXhjAdnG47G1Gk64bocLzWxLFl0HjDxCArm+JiwxSMVYMxm41gPepu18yLXQRb+jIRGJZ1mzKVfW39qrK3c9ZCUu/ag+cHjazG5yx0dEUfJ6dcckT5FsMq48TuEF0TACCqTrZumIDzhw7t/JFA5e2AY5U8ftofJEcQD3WU= ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-07T10:18:56.390 INFO:teuthology.orchestra.run.vm06.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDxvjnoxuY66369ItQCnNDOoNxQyXeCMYYGnx5KMHRn+X5Fy6wJouWzTMXUd8yf+vBlIbKk/1iRy48ZMtf8Op8rbffOsM8LRuOAcR3sCm4uE4xbfhvjN9BNw4d6YuR1JBGdwm7GJMotaKQSCQB01+bePWGrxFOP+JmX+HLa1UMjCSkyI1pJgU7oYrdtyR5WhqN4zXxnN2ts/xmkI7VCORTftwNITTUXpkBIZbAfQEIRZeKz1XgAwYPhVyRDpB0r6ct4ZG/grSLlCtRVaIMHitAa8eLSmw5CTiWyDhInL8dsTPlPseArkf6nTuPlN5RVvkot/6cYEXG6hHZtfccoSt8hQKzEkCXhjAdnG47G1Gk64bocLzWxLFl0HjDxCArm+JiwxSMVYMxm41gPepu18yLXQRb+jIRGJZ1mzKVfW39qrK3c9ZCUu/ag+cHjazG5yx0dEUfJ6dcckT5FsMq48TuEF0TACCqTrZumIDzhw7t/JFA5e2AY5U8ftofJEcQD3WU= ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:18:56.399 DEBUG:teuthology.orchestra.run.vm08:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDxvjnoxuY66369ItQCnNDOoNxQyXeCMYYGnx5KMHRn+X5Fy6wJouWzTMXUd8yf+vBlIbKk/1iRy48ZMtf8Op8rbffOsM8LRuOAcR3sCm4uE4xbfhvjN9BNw4d6YuR1JBGdwm7GJMotaKQSCQB01+bePWGrxFOP+JmX+HLa1UMjCSkyI1pJgU7oYrdtyR5WhqN4zXxnN2ts/xmkI7VCORTftwNITTUXpkBIZbAfQEIRZeKz1XgAwYPhVyRDpB0r6ct4ZG/grSLlCtRVaIMHitAa8eLSmw5CTiWyDhInL8dsTPlPseArkf6nTuPlN5RVvkot/6cYEXG6hHZtfccoSt8hQKzEkCXhjAdnG47G1Gk64bocLzWxLFl0HjDxCArm+JiwxSMVYMxm41gPepu18yLXQRb+jIRGJZ1mzKVfW39qrK3c9ZCUu/ag+cHjazG5yx0dEUfJ6dcckT5FsMq48TuEF0TACCqTrZumIDzhw7t/JFA5e2AY5U8ftofJEcQD3WU= ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-07T10:18:56.432 INFO:teuthology.orchestra.run.vm08.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDxvjnoxuY66369ItQCnNDOoNxQyXeCMYYGnx5KMHRn+X5Fy6wJouWzTMXUd8yf+vBlIbKk/1iRy48ZMtf8Op8rbffOsM8LRuOAcR3sCm4uE4xbfhvjN9BNw4d6YuR1JBGdwm7GJMotaKQSCQB01+bePWGrxFOP+JmX+HLa1UMjCSkyI1pJgU7oYrdtyR5WhqN4zXxnN2ts/xmkI7VCORTftwNITTUXpkBIZbAfQEIRZeKz1XgAwYPhVyRDpB0r6ct4ZG/grSLlCtRVaIMHitAa8eLSmw5CTiWyDhInL8dsTPlPseArkf6nTuPlN5RVvkot/6cYEXG6hHZtfccoSt8hQKzEkCXhjAdnG47G1Gk64bocLzWxLFl0HjDxCArm+JiwxSMVYMxm41gPepu18yLXQRb+jIRGJZ1mzKVfW39qrK3c9ZCUu/ag+cHjazG5yx0dEUfJ6dcckT5FsMq48TuEF0TACCqTrZumIDzhw7t/JFA5e2AY5U8ftofJEcQD3WU= ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:18:56.442 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-07T10:18:56.616 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:18:56.643 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:56 vm03 ceph-mon[50786]: from='client.14170 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:18:56.643 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:56 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3596450759' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-07T10:18:56.643 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:56 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/1161997096' entity='client.admin' 2026-03-07T10:18:56.643 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:56 vm03 ceph-mon[50786]: mgrmap e12: a(active, since 2s) 2026-03-07T10:18:57.023 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-07T10:18:57.023 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-07T10:18:57.189 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:18:57.639 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm06 2026-03-07T10:18:57.639 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-07T10:18:57.639 DEBUG:teuthology.orchestra.run.vm06:> dd of=/etc/ceph/ceph.conf 2026-03-07T10:18:57.653 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-07T10:18:57.654 DEBUG:teuthology.orchestra.run.vm06:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:18:57.708 INFO:tasks.cephadm:Adding host vm06 to orchestrator... 2026-03-07T10:18:57.708 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph orch host add vm06 2026-03-07T10:18:57.940 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:18:58.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:57 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3744267935' entity='client.admin' 2026-03-07T10:18:58.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:58.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:58.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:18:58.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:58.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:18:58.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:58.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:58.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:18:58.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:18:58.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:18:59.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:58 vm03 ceph-mon[50786]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:18:59.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:58 vm03 ceph-mon[50786]: Updating vm03:/etc/ceph/ceph.conf 2026-03-07T10:18:59.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:58 vm03 ceph-mon[50786]: Updating vm03:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:18:59.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:58 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:59.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:58 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:18:59.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:58 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:00.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:59 vm03 ceph-mon[50786]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:19:00.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:59 vm03 ceph-mon[50786]: Updating vm03:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:19:00.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:18:59 vm03 ceph-mon[50786]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm06", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:00.729 INFO:teuthology.orchestra.run.vm03.stdout:Added host 'vm06' with addr '192.168.123.106' 2026-03-07T10:19:00.780 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph orch host ls --format=json 2026-03-07T10:19:00.947 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:19:00.984 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:00 vm03 ceph-mon[50786]: Deploying cephadm binary to vm06 2026-03-07T10:19:00.985 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:00 vm03 ceph-mon[50786]: mgrmap e13: a(active, since 6s) 2026-03-07T10:19:00.985 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:00 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:00.985 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:00 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:00.985 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:00 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:01.263 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:19:01.263 INFO:teuthology.orchestra.run.vm03.stdout:[{"addr": "192.168.123.103", "hostname": "vm03", "labels": [], "status": ""}, {"addr": "192.168.123.106", "hostname": "vm06", "labels": [], "status": ""}] 2026-03-07T10:19:01.308 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm08 2026-03-07T10:19:01.309 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:19:01.309 DEBUG:teuthology.orchestra.run.vm08:> dd of=/etc/ceph/ceph.conf 2026-03-07T10:19:01.323 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:19:01.323 DEBUG:teuthology.orchestra.run.vm08:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:19:01.376 INFO:tasks.cephadm:Adding host vm08 to orchestrator... 2026-03-07T10:19:01.376 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph orch host add vm08 2026-03-07T10:19:01.532 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:19:02.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:01 vm03 ceph-mon[50786]: Added host vm06 2026-03-07T10:19:02.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:02.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:03.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:02 vm03 ceph-mon[50786]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:19:03.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:02 vm03 ceph-mon[50786]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm08", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:04.047 INFO:teuthology.orchestra.run.vm03.stdout:Added host 'vm08' with addr '192.168.123.108' 2026-03-07T10:19:04.098 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph orch host ls --format=json 2026-03-07T10:19:04.260 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:19:04.281 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:03 vm03 ceph-mon[50786]: Deploying cephadm binary to vm08 2026-03-07T10:19:04.281 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:03 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:04.281 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:03 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:04.281 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:03 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:04.281 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:03 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:04.281 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:03 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:04.281 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:03 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:04.281 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:03 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:04.281 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:03 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:04.574 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:19:04.574 INFO:teuthology.orchestra.run.vm03.stdout:[{"addr": "192.168.123.103", "hostname": "vm03", "labels": [], "status": ""}, {"addr": "192.168.123.106", "hostname": "vm06", "labels": [], "status": ""}, {"addr": "192.168.123.108", "hostname": "vm08", "labels": [], "status": ""}] 2026-03-07T10:19:04.624 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-07T10:19:04.625 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd crush tunables default 2026-03-07T10:19:04.779 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:19:05.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:04 vm03 ceph-mon[50786]: Updating vm06:/etc/ceph/ceph.conf 2026-03-07T10:19:05.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:04 vm03 ceph-mon[50786]: Updating vm06:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:19:05.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:04 vm03 ceph-mon[50786]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:19:05.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:04 vm03 ceph-mon[50786]: Updating vm06:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:19:05.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:04 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:05.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:04 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:05.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:04 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:05.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:04 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:05.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:04 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:05.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:04 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:05.985 INFO:teuthology.orchestra.run.vm03.stderr:adjusted tunables profile to default 2026-03-07T10:19:06.038 INFO:tasks.cephadm:Adding mon.a on vm03 2026-03-07T10:19:06.038 INFO:tasks.cephadm:Adding mon.b on vm06 2026-03-07T10:19:06.038 INFO:tasks.cephadm:Adding mon.c on vm08 2026-03-07T10:19:06.038 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph orch apply mon '3;vm03:192.168.123.103=a;vm06:192.168.123.106=b;vm08:192.168.123.108=c' 2026-03-07T10:19:06.229 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-07T10:19:06.276 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-07T10:19:06.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:05 vm03 ceph-mon[50786]: Added host vm08 2026-03-07T10:19:06.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:05 vm03 ceph-mon[50786]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:19:06.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:05 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/907823702' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-07T10:19:06.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:05 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:06.636 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled mon update... 2026-03-07T10:19:06.717 DEBUG:teuthology.orchestra.run.vm06:mon.b> sudo journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.b.service 2026-03-07T10:19:06.718 DEBUG:teuthology.orchestra.run.vm08:mon.c> sudo journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.c.service 2026-03-07T10:19:06.721 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-07T10:19:06.721 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph mon dump -f json 2026-03-07T10:19:06.975 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-07T10:19:07.025 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-07T10:19:07.386 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:19:07.387 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"d33fdf60-1a0e-11f1-a719-83e365122cb4","modified":"2026-03-07T10:18:07.443746Z","created":"2026-03-07T10:18:07.443746Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-07T10:19:07.387 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-07T10:19:07.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:06 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/907823702' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-07T10:19:07.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:06 vm03 ceph-mon[50786]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:19:07.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:06 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:07.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:06 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:07.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:06 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:07.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:06 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:07.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:06 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:07.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:06 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:07.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:06 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:07.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:06 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:08.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:07 vm03 ceph-mon[50786]: from='client.14190 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "3;vm03:192.168.123.103=a;vm06:192.168.123.106=b;vm08:192.168.123.108=c", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:08.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:07 vm03 ceph-mon[50786]: Saving service mon spec with placement vm03:192.168.123.103=a;vm06:192.168.123.106=b;vm08:192.168.123.108=c;count:3 2026-03-07T10:19:08.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:07 vm03 ceph-mon[50786]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:19:08.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:07 vm03 ceph-mon[50786]: Updating vm08:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:19:08.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:07 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:08.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:07 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:08.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:07 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:08.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:07 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:19:08.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:07 vm03 ceph-mon[50786]: from='client.? 192.168.123.108:0/3726068185' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:19:08.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:07 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:08.439 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-07T10:19:08.439 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph mon dump -f json 2026-03-07T10:19:08.654 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.c/config 2026-03-07T10:19:09.910 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Manager daemon a is now available 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/882125407' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14100 192.168.123.103:0/333864381' entity='mgr.a' 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: mgrmap e3: a(active, since 1.00908s) 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: mgrmap e4: a(active, since 2s) 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/360375697' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3487093848' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3487093848' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/4245075995' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/4245075995' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: mgrmap e5: a(active, since 4s) 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/767823830' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Active manager daemon a restarted 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Activating manager daemon a 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: osdmap e2: 0 total, 0 up, 0 in 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: mgrmap e6: a(active, starting, since 0.285867s) 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Manager daemon a is now available 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Found migration_current of "None". Setting to last migration. 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: mgrmap e7: a(active, since 1.30239s) 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: [07/Mar/2026:10:18:37] ENGINE Bus STARTING 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: [07/Mar/2026:10:18:37] ENGINE Serving on https://192.168.123.103:7150 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: [07/Mar/2026:10:18:37] ENGINE Client ('192.168.123.103', 44192) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: [07/Mar/2026:10:18:37] ENGINE Serving on http://192.168.123.103:8765 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: [07/Mar/2026:10:18:37] ENGINE Bus STARTED 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Generating ssh key... 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: mgrmap e8: a(active, since 2s) 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm03", "addr": "192.168.123.103", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Deploying cephadm binary to vm03 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Added host vm03 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Saving service mon spec with placement count:5 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Saving service mgr spec with placement count:2 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3081026258' entity='client.admin' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3173881785' entity='client.admin' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/2109935597' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14124 192.168.123.103:0/3256805573' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/2109935597' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: mgrmap e9: a(active, since 8s) 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/2790889367' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Active manager daemon a restarted 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Activating manager daemon a 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: osdmap e3: 0 total, 0 up, 0 in 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: mgrmap e10: a(active, starting, since 0.00640608s) 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Manager daemon a is now available 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: mgrmap e11: a(active, since 1.00883s) 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: [07/Mar/2026:10:18:54] ENGINE Bus STARTING 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: [07/Mar/2026:10:18:54] ENGINE Serving on http://192.168.123.103:8765 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: [07/Mar/2026:10:18:54] ENGINE Serving on https://192.168.123.103:7150 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: [07/Mar/2026:10:18:54] ENGINE Bus STARTED 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: [07/Mar/2026:10:18:54] ENGINE Client ('192.168.123.103', 57218) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14168 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14170 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3596450759' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/1161997096' entity='client.admin' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: mgrmap e12: a(active, since 2s) 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3744267935' entity='client.admin' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:10.220 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Updating vm03:/etc/ceph/ceph.conf 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Updating vm03:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Updating vm03:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm06", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Deploying cephadm binary to vm06 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: mgrmap e13: a(active, since 6s) 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Added host vm06 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm08", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Deploying cephadm binary to vm08 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Updating vm06:/etc/ceph/ceph.conf 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Updating vm06:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Updating vm06:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Added host vm08 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/907823702' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/907823702' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.14190 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "3;vm03:192.168.123.103=a;vm06:192.168.123.106=b;vm08:192.168.123.108=c", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Saving service mon spec with placement vm03:192.168.123.103=a;vm06:192.168.123.106=b;vm08:192.168.123.108=c;count:3 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: Updating vm08:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='client.? 192.168.123.108:0/3726068185' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:19:10.221 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:09 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:14.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: Deploying daemon mon.b on vm06 2026-03-07T10:19:14.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:19:14.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:14.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: mon.a calling monitor election 2026-03-07T10:19:14.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:14.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:14.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:14.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: mon.c calling monitor election 2026-03-07T10:19:14.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:14.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:14.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: monmap epoch 2 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: last_changed 2026-03-07T10:19:08.669425+0000 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: created 2026-03-07T10:18:07.443746+0000 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: min_mon_release 19 (squid) 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: election_strategy: 1 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.a 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: 1: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.c 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: fsmap 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: mgrmap e13: a(active, since 20s) 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: overall HEALTH_OK 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:14.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:13 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:15.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:19:14 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:19:14.667+0000 7f2d84905640 -1 mgr.server handle_report got status from non-daemon mon.c 2026-03-07T10:19:19.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:18 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: mon.a calling monitor election 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: mon.c calling monitor election 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: monmap epoch 3 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: last_changed 2026-03-07T10:19:13.916506+0000 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: created 2026-03-07T10:18:07.443746+0000 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: min_mon_release 19 (squid) 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: election_strategy: 1 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.a 2026-03-07T10:19:19.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: 1: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.c 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: 2: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.b 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: fsmap 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: mgrmap e13: a(active, since 25s) 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: Health check failed: 1/3 mons down, quorum a,c (MON_DOWN) 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: Health detail: HEALTH_WARN 1/3 mons down, quorum a,c 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: [WRN] MON_DOWN: 1/3 mons down, quorum a,c 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: mon.b (rank 2) addr [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] is down (out of quorum) 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:19.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:18 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:19.439 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:19:19.439 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":3,"fsid":"d33fdf60-1a0e-11f1-a719-83e365122cb4","modified":"2026-03-07T10:19:13.916506Z","created":"2026-03-07T10:18:07.443746Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:3300","nonce":0},{"type":"v1","addr":"192.168.123.108:6789","nonce":0}]},"addr":"192.168.123.108:6789/0","public_addr":"192.168.123.108:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":2,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-07T10:19:19.440 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 3 2026-03-07T10:19:19.505 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-07T10:19:19.505 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph config generate-minimal-conf 2026-03-07T10:19:19.676 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:19:19.984 INFO:teuthology.orchestra.run.vm03.stdout:# minimal ceph.conf for d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:19:19.984 INFO:teuthology.orchestra.run.vm03.stdout:[global] 2026-03-07T10:19:19.984 INFO:teuthology.orchestra.run.vm03.stdout: fsid = d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:19:19.984 INFO:teuthology.orchestra.run.vm03.stdout: mon_host = [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] 2026-03-07T10:19:20.047 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-07T10:19:20.047 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-07T10:19:20.047 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.conf 2026-03-07T10:19:20.075 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-07T10:19:20.075 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:19:20.138 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-07T10:19:20.138 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.conf 2026-03-07T10:19:20.162 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-07T10:19:20.162 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:19:20.224 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:19:20.224 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.conf 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: Updating vm03:/etc/ceph/ceph.conf 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: Updating vm06:/etc/ceph/ceph.conf 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='client.? 192.168.123.108:0/1072451818' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:20.249 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:19 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/3956324876' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:20.252 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:19:20.252 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:19:20.318 INFO:tasks.cephadm:Adding mgr.a on vm03 2026-03-07T10:19:20.318 INFO:tasks.cephadm:Adding mgr.b on vm06 2026-03-07T10:19:20.318 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph orch apply mgr '2;vm03=a;vm06=b' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: Updating vm03:/etc/ceph/ceph.conf 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: Updating vm06:/etc/ceph/ceph.conf 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:19:20.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='client.? 192.168.123.108:0/1072451818' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:20.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:19 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3956324876' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:20.504 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.c/config 2026-03-07T10:19:20.807 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled mgr update... 2026-03-07T10:19:20.851 DEBUG:teuthology.orchestra.run.vm06:mgr.b> sudo journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.b.service 2026-03-07T10:19:20.852 INFO:tasks.cephadm:Deploying OSDs... 2026-03-07T10:19:20.853 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-07T10:19:20.853 DEBUG:teuthology.orchestra.run.vm03:> dd if=/scratch_devs of=/dev/stdout 2026-03-07T10:19:20.866 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:19:20.866 DEBUG:teuthology.orchestra.run.vm03:> ls /dev/[sv]d? 2026-03-07T10:19:20.922 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vda 2026-03-07T10:19:20.922 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdb 2026-03-07T10:19:20.922 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdc 2026-03-07T10:19:20.922 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdd 2026-03-07T10:19:20.922 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vde 2026-03-07T10:19:20.922 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-07T10:19:20.922 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-07T10:19:20.922 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdb 2026-03-07T10:19:20.979 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdb 2026-03-07T10:19:20.979 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:20.979 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-07T10:19:20.979 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:20.979 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:20.979 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-07 10:18:58.716787883 +0000 2026-03-07T10:19:20.979 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-07 10:17:19.226847868 +0000 2026-03-07T10:19:20.979 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-07 10:17:19.226847868 +0000 2026-03-07T10:19:20.979 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-07 10:13:44.318000000 +0000 2026-03-07T10:19:20.979 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-07T10:19:21.041 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-07T10:19:21.041 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-07T10:19:21.042 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.00016585 s, 3.1 MB/s 2026-03-07T10:19:21.042 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-07T10:19:21.098 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdc 2026-03-07T10:19:21.126 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: Deploying daemon mon.b on vm06 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: mon.a calling monitor election 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: mon.c calling monitor election 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: monmap epoch 2 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: last_changed 2026-03-07T10:19:08.669425+0000 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: created 2026-03-07T10:18:07.443746+0000 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: min_mon_release 19 (squid) 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: election_strategy: 1 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.a 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: 1: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.c 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: fsmap 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: mgrmap e13: a(active, since 20s) 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: overall HEALTH_OK 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: mon.a calling monitor election 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: mon.c calling monitor election 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: monmap epoch 3 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: last_changed 2026-03-07T10:19:13.916506+0000 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: created 2026-03-07T10:18:07.443746+0000 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: min_mon_release 19 (squid) 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: election_strategy: 1 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.a 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: 1: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.c 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: 2: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.b 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: fsmap 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: mgrmap e13: a(active, since 25s) 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: Health check failed: 1/3 mons down, quorum a,c (MON_DOWN) 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: Health detail: HEALTH_WARN 1/3 mons down, quorum a,c 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: [WRN] MON_DOWN: 1/3 mons down, quorum a,c 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: mon.b (rank 2) addr [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] is down (out of quorum) 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.127 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: Updating vm03:/etc/ceph/ceph.conf 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: Updating vm06:/etc/ceph/ceph.conf 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='client.? 192.168.123.108:0/1072451818' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:21.128 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:20 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3956324876' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:21.153 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdc 2026-03-07T10:19:21.153 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:21.153 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-07T10:19:21.153 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:21.153 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:21.153 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-07 10:18:58.744787904 +0000 2026-03-07T10:19:21.154 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-07 10:17:19.271847905 +0000 2026-03-07T10:19:21.154 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-07 10:17:19.271847905 +0000 2026-03-07T10:19:21.154 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-07 10:13:44.323000000 +0000 2026-03-07T10:19:21.154 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-07T10:19:21.215 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-07T10:19:21.215 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-07T10:19:21.216 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000125043 s, 4.1 MB/s 2026-03-07T10:19:21.216 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-07T10:19:21.272 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdd 2026-03-07T10:19:21.328 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdd 2026-03-07T10:19:21.329 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:21.329 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-07T10:19:21.329 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:21.329 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:21.329 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-07 10:18:58.787787935 +0000 2026-03-07T10:19:21.329 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-07 10:17:19.228847869 +0000 2026-03-07T10:19:21.329 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-07 10:17:19.228847869 +0000 2026-03-07T10:19:21.329 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-07 10:13:44.330000000 +0000 2026-03-07T10:19:21.329 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-07T10:19:21.391 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-07T10:19:21.391 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-07T10:19:21.391 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000199944 s, 2.6 MB/s 2026-03-07T10:19:21.392 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-07T10:19:21.448 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vde 2026-03-07T10:19:21.513 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vde 2026-03-07T10:19:21.513 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:21.513 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-07T10:19:21.513 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:21.513 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:21.513 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-07 10:18:58.821787961 +0000 2026-03-07T10:19:21.513 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-07 10:17:19.232847873 +0000 2026-03-07T10:19:21.513 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-07 10:17:19.232847873 +0000 2026-03-07T10:19:21.513 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-07 10:13:44.337000000 +0000 2026-03-07T10:19:21.513 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-07T10:19:21.577 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-07T10:19:21.577 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-07T10:19:21.577 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000119555 s, 4.3 MB/s 2026-03-07T10:19:21.578 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-07T10:19:21.633 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-07T10:19:21.633 DEBUG:teuthology.orchestra.run.vm06:> dd if=/scratch_devs of=/dev/stdout 2026-03-07T10:19:21.708 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:19:21.708 DEBUG:teuthology.orchestra.run.vm06:> ls /dev/[sv]d? 2026-03-07T10:19:21.717 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:21.693+0000 7f4797f74100 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-07T10:19:21.766 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vda 2026-03-07T10:19:21.766 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdb 2026-03-07T10:19:21.766 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdc 2026-03-07T10:19:21.766 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdd 2026-03-07T10:19:21.766 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vde 2026-03-07T10:19:21.766 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-07T10:19:21.766 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-07T10:19:21.766 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdb 2026-03-07T10:19:21.835 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdb 2026-03-07T10:19:21.835 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:21.835 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 221 Links: 1 Device type: fc,10 2026-03-07T10:19:21.835 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:21.835 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:21.835 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-07 10:19:03.234478219 +0000 2026-03-07T10:19:21.835 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-07 10:17:19.244497265 +0000 2026-03-07T10:19:21.835 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-07 10:17:19.244497265 +0000 2026-03-07T10:19:21.835 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-07 10:14:14.190000000 +0000 2026-03-07T10:19:21.835 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-07T10:19:21.942 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-07T10:19:21.942 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-07T10:19:21.942 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000140803 s, 3.6 MB/s 2026-03-07T10:19:21.944 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-07T10:19:21.976 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdc 2026-03-07T10:19:22.048 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdc 2026-03-07T10:19:22.048 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:22.048 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 222 Links: 1 Device type: fc,20 2026-03-07T10:19:22.048 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:22.048 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:22.048 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-07 10:19:03.258478229 +0000 2026-03-07T10:19:22.048 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-07 10:17:19.271497290 +0000 2026-03-07T10:19:22.048 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-07 10:17:19.271497290 +0000 2026-03-07T10:19:22.048 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-07 10:14:14.195000000 +0000 2026-03-07T10:19:22.048 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-07T10:19:22.117 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: mon.b calling monitor election 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: from='client.14208 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm03=a;vm06=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: Saving service mgr spec with placement vm03=a;vm06=b;count:2 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: Deploying daemon mgr.b on vm06 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: mon.b calling monitor election 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: mon.a calling monitor election 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: mon.c calling monitor election 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: monmap epoch 3 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: last_changed 2026-03-07T10:19:13.916506+0000 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: created 2026-03-07T10:18:07.443746+0000 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: min_mon_release 19 (squid) 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: election_strategy: 1 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.a 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: 1: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.c 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: 2: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.b 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: fsmap 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: mgrmap e13: a(active, since 27s) 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: Health check cleared: MON_DOWN (was: 1/3 mons down, quorum a,c) 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: Cluster is now healthy 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: overall HEALTH_OK 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:22.118 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:21 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:21.832+0000 7f4797f74100 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-07T10:19:22.120 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-07T10:19:22.120 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-07T10:19:22.120 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000121988 s, 4.2 MB/s 2026-03-07T10:19:22.121 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: mon.b calling monitor election 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: from='client.14208 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm03=a;vm06=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: Saving service mgr spec with placement vm03=a;vm06=b;count:2 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: Deploying daemon mgr.b on vm06 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: mon.b calling monitor election 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: mon.a calling monitor election 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: mon.c calling monitor election 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: monmap epoch 3 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: last_changed 2026-03-07T10:19:13.916506+0000 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: created 2026-03-07T10:18:07.443746+0000 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: min_mon_release 19 (squid) 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: election_strategy: 1 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.a 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: 1: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.c 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: 2: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.b 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: fsmap 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: mgrmap e13: a(active, since 27s) 2026-03-07T10:19:22.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: Health check cleared: MON_DOWN (was: 1/3 mons down, quorum a,c) 2026-03-07T10:19:22.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: Cluster is now healthy 2026-03-07T10:19:22.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: overall HEALTH_OK 2026-03-07T10:19:22.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:21 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:22.181 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdd 2026-03-07T10:19:22.245 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdd 2026-03-07T10:19:22.245 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:22.245 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-07T10:19:22.245 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:22.245 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:22.245 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-07 10:19:03.282478239 +0000 2026-03-07T10:19:22.245 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-07 10:17:19.232497254 +0000 2026-03-07T10:19:22.245 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-07 10:17:19.232497254 +0000 2026-03-07T10:19:22.245 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-07 10:14:14.215000000 +0000 2026-03-07T10:19:22.245 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: mon.b calling monitor election 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: from='client.14208 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm03=a;vm06=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: Saving service mgr spec with placement vm03=a;vm06=b;count:2 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: Deploying daemon mgr.b on vm06 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: mon.b calling monitor election 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: mon.a calling monitor election 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: mon.c calling monitor election 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: monmap epoch 3 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: last_changed 2026-03-07T10:19:13.916506+0000 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: created 2026-03-07T10:18:07.443746+0000 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: min_mon_release 19 (squid) 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: election_strategy: 1 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.a 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: 1: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.c 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: 2: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.b 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: fsmap 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: mgrmap e13: a(active, since 27s) 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: Health check cleared: MON_DOWN (was: 1/3 mons down, quorum a,c) 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: Cluster is now healthy 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: overall HEALTH_OK 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.292 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.292 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:21 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:22.313 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-07T10:19:22.313 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-07T10:19:22.313 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000163997 s, 3.1 MB/s 2026-03-07T10:19:22.314 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-07T10:19:22.374 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vde 2026-03-07T10:19:22.430 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vde 2026-03-07T10:19:22.430 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:22.430 INFO:teuthology.orchestra.run.vm06.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-07T10:19:22.430 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:22.430 INFO:teuthology.orchestra.run.vm06.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:22.431 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-07 10:19:03.307478249 +0000 2026-03-07T10:19:22.431 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-07 10:17:19.240497262 +0000 2026-03-07T10:19:22.431 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-07 10:17:19.240497262 +0000 2026-03-07T10:19:22.431 INFO:teuthology.orchestra.run.vm06.stdout: Birth: 2026-03-07 10:14:14.219000000 +0000 2026-03-07T10:19:22.431 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-07T10:19:22.492 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-07T10:19:22.492 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-07T10:19:22.492 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000114013 s, 4.5 MB/s 2026-03-07T10:19:22.493 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-07T10:19:22.552 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-07T10:19:22.552 DEBUG:teuthology.orchestra.run.vm08:> dd if=/scratch_devs of=/dev/stdout 2026-03-07T10:19:22.566 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:19:22.566 DEBUG:teuthology.orchestra.run.vm08:> ls /dev/[sv]d? 2026-03-07T10:19:22.621 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vda 2026-03-07T10:19:22.621 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdb 2026-03-07T10:19:22.621 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdc 2026-03-07T10:19:22.621 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdd 2026-03-07T10:19:22.621 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vde 2026-03-07T10:19:22.621 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-07T10:19:22.621 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-07T10:19:22.621 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdb 2026-03-07T10:19:22.676 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdb 2026-03-07T10:19:22.676 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:22.676 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 221 Links: 1 Device type: fc,10 2026-03-07T10:19:22.676 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:22.676 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:22.676 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-07 10:19:06.611434446 +0000 2026-03-07T10:19:22.676 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-07 10:17:19.366298884 +0000 2026-03-07T10:19:22.676 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-07 10:17:19.366298884 +0000 2026-03-07T10:19:22.677 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-07 10:13:18.223000000 +0000 2026-03-07T10:19:22.677 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-07T10:19:22.739 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-07T10:19:22.739 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-07T10:19:22.739 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000185307 s, 2.8 MB/s 2026-03-07T10:19:22.740 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-07T10:19:22.795 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdc 2026-03-07T10:19:22.850 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdc 2026-03-07T10:19:22.850 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:22.850 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 224 Links: 1 Device type: fc,20 2026-03-07T10:19:22.850 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:22.850 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:22.850 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-07 10:19:06.644434481 +0000 2026-03-07T10:19:22.850 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-07 10:17:19.371298889 +0000 2026-03-07T10:19:22.850 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-07 10:17:19.371298889 +0000 2026-03-07T10:19:22.850 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-07 10:13:18.230000000 +0000 2026-03-07T10:19:22.850 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.a", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:22.911 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:22 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:22.912 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-07T10:19:22.912 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-07T10:19:22.912 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000121477 s, 4.2 MB/s 2026-03-07T10:19:22.913 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-07T10:19:22.969 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdd 2026-03-07T10:19:23.024 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdd 2026-03-07T10:19:23.024 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:23.024 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 225 Links: 1 Device type: fc,30 2026-03-07T10:19:23.024 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:23.024 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:23.024 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-07 10:19:06.686434525 +0000 2026-03-07T10:19:23.024 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-07 10:17:19.386298902 +0000 2026-03-07T10:19:23.024 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-07 10:17:19.386298902 +0000 2026-03-07T10:19:23.024 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-07 10:13:18.235000000 +0000 2026-03-07T10:19:23.024 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-07T10:19:23.084 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-07T10:19:23.085 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-07T10:19:23.085 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000174576 s, 2.9 MB/s 2026-03-07T10:19:23.085 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-07T10:19:23.141 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vde 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.a", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:23.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:22 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.196 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vde 2026-03-07T10:19:23.197 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:19:23.197 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 243 Links: 1 Device type: fc,40 2026-03-07T10:19:23.197 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:19:23.197 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:19:23.197 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-07 10:19:06.724434564 +0000 2026-03-07T10:19:23.197 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-07 10:17:19.363298881 +0000 2026-03-07T10:19:23.197 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-07 10:17:19.363298881 +0000 2026-03-07T10:19:23.197 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-07 10:13:18.243000000 +0000 2026-03-07T10:19:23.197 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.a", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:23.218 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:22 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:22.933+0000 7f4797f74100 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-07T10:19:23.259 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-07T10:19:23.259 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-07T10:19:23.259 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000173285 s, 3.0 MB/s 2026-03-07T10:19:23.259 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-07T10:19:23.315 INFO:tasks.cephadm:Deploying osd.0 on vm03 with /dev/vde... 2026-03-07T10:19:23.315 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- lvm zap /dev/vde 2026-03-07T10:19:23.472 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:19:23.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:23 vm03 ceph-mon[50786]: Reconfiguring mgr.a (unknown last config time)... 2026-03-07T10:19:23.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:23 vm03 ceph-mon[50786]: Reconfiguring daemon mgr.a on vm03 2026-03-07T10:19:24.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:23 vm06 ceph-mon[56197]: Reconfiguring mgr.a (unknown last config time)... 2026-03-07T10:19:24.092 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:23 vm06 ceph-mon[56197]: Reconfiguring daemon mgr.a on vm03 2026-03-07T10:19:24.092 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:23 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:23.740+0000 7f4797f74100 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-07T10:19:24.092 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:23 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:23.858+0000 7f4797f74100 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-07T10:19:24.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:23 vm08 ceph-mon[55906]: Reconfiguring mgr.a (unknown last config time)... 2026-03-07T10:19:24.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:23 vm08 ceph-mon[55906]: Reconfiguring daemon mgr.a on vm03 2026-03-07T10:19:24.468 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:24 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:24.090+0000 7f4797f74100 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-07T10:19:24.487 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:19:24.504 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph orch daemon add osd vm03:/dev/vde 2026-03-07T10:19:24.662 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:19:24.949 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:24 vm03 ceph-mon[50786]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:24.949 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:24 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:25.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:24 vm06 ceph-mon[56197]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:25.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:24 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:25.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:24 vm08 ceph-mon[55906]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:25.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:24 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:26.006 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:25 vm03 ceph-mon[50786]: from='client.24103 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:26.006 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:25 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:19:26.006 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:25 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:19:26.006 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:25 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:26.007 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:25 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/875533697' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d8ac3e4c-913c-4569-8ccb-8da4fe2e73a7"}]: dispatch 2026-03-07T10:19:26.007 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:25 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/875533697' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d8ac3e4c-913c-4569-8ccb-8da4fe2e73a7"}]': finished 2026-03-07T10:19:26.007 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:25 vm03 ceph-mon[50786]: osdmap e5: 1 total, 0 up, 1 in 2026-03-07T10:19:26.007 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:25 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:26.140 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:25 vm06 ceph-mon[56197]: from='client.24103 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:26.140 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:25 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:19:26.140 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:25 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:19:26.140 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:25 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:26.140 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:25 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/875533697' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d8ac3e4c-913c-4569-8ccb-8da4fe2e73a7"}]: dispatch 2026-03-07T10:19:26.140 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:25 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/875533697' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d8ac3e4c-913c-4569-8ccb-8da4fe2e73a7"}]': finished 2026-03-07T10:19:26.140 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:25 vm06 ceph-mon[56197]: osdmap e5: 1 total, 0 up, 1 in 2026-03-07T10:19:26.140 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:25 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:26.140 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:25 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:25.816+0000 7f4797f74100 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-07T10:19:26.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:25 vm08 ceph-mon[55906]: from='client.24103 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:26.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:25 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:19:26.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:25 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:19:26.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:25 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:26.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:25 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/875533697' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d8ac3e4c-913c-4569-8ccb-8da4fe2e73a7"}]: dispatch 2026-03-07T10:19:26.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:25 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/875533697' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d8ac3e4c-913c-4569-8ccb-8da4fe2e73a7"}]': finished 2026-03-07T10:19:26.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:25 vm08 ceph-mon[55906]: osdmap e5: 1 total, 0 up, 1 in 2026-03-07T10:19:26.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:25 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:26.467 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:26 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:26.138+0000 7f4797f74100 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:19:26.468 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:26 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:26.268+0000 7f4797f74100 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-07T10:19:26.468 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:26 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:26.381+0000 7f4797f74100 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-07T10:19:26.937 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:26 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:26.511+0000 7f4797f74100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:19:26.937 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:26 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:26.624+0000 7f4797f74100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:19:27.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:26 vm03 ceph-mon[50786]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:27.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:26 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3843146484' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:19:27.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:26 vm06 ceph-mon[56197]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:27.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:26 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3843146484' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:19:27.218 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:27 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:27.108+0000 7f4797f74100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:19:27.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:26 vm08 ceph-mon[55906]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:27.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:26 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/3843146484' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:19:27.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:27 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:27.254+0000 7f4797f74100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:19:28.217 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:27 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:27.938+0000 7f4797f74100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:19:29.082 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:28 vm03 ceph-mon[50786]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:29.194 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:28 vm06 ceph-mon[56197]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:29.194 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:28 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:28.929+0000 7f4797f74100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:19:29.194 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:29 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:29.048+0000 7f4797f74100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:19:29.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:28 vm08 ceph-mon[55906]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:29.448 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:29 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:29.191+0000 7f4797f74100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:19:29.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:29 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:29.445+0000 7f4797f74100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:19:29.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:29 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:29.569+0000 7f4797f74100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:19:30.217 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:29 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:29.885+0000 7f4797f74100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:19:30.575 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:30 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:30.221+0000 7f4797f74100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:19:30.968 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:30 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:30.572+0000 7f4797f74100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:19:30.968 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:19:30 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:19:30.690+0000 7f4797f74100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:19:30.974 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:30 vm03 ceph-mon[50786]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:30.974 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:30 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-07T10:19:30.974 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:30 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:30.974 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:30 vm03 ceph-mon[50786]: Standby manager daemon b started 2026-03-07T10:19:30.974 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:30 vm03 ceph-mon[50786]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/crt"}]: dispatch 2026-03-07T10:19:30.974 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:30 vm03 ceph-mon[50786]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-07T10:19:30.975 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:30 vm03 ceph-mon[50786]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/key"}]: dispatch 2026-03-07T10:19:30.975 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:30 vm03 ceph-mon[50786]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-07T10:19:31.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:30 vm08 ceph-mon[55906]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:31.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:30 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-07T10:19:31.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:30 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:31.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:30 vm08 ceph-mon[55906]: Standby manager daemon b started 2026-03-07T10:19:31.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:30 vm08 ceph-mon[55906]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/crt"}]: dispatch 2026-03-07T10:19:31.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:30 vm08 ceph-mon[55906]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-07T10:19:31.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:30 vm08 ceph-mon[55906]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/key"}]: dispatch 2026-03-07T10:19:31.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:30 vm08 ceph-mon[55906]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-07T10:19:31.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:30 vm06 ceph-mon[56197]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:31.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:30 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-07T10:19:31.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:30 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:31.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:30 vm06 ceph-mon[56197]: Standby manager daemon b started 2026-03-07T10:19:31.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:30 vm06 ceph-mon[56197]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/crt"}]: dispatch 2026-03-07T10:19:31.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:30 vm06 ceph-mon[56197]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-07T10:19:31.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:30 vm06 ceph-mon[56197]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/key"}]: dispatch 2026-03-07T10:19:31.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:30 vm06 ceph-mon[56197]: from='mgr.? 192.168.123.106:0/3946802815' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-07T10:19:32.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:31 vm08 ceph-mon[55906]: Deploying daemon osd.0 on vm03 2026-03-07T10:19:32.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:31 vm08 ceph-mon[55906]: mgrmap e14: a(active, since 37s), standbys: b 2026-03-07T10:19:32.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:31 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "b", "id": "b"}]: dispatch 2026-03-07T10:19:32.368 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:31 vm03 ceph-mon[50786]: Deploying daemon osd.0 on vm03 2026-03-07T10:19:32.368 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:31 vm03 ceph-mon[50786]: mgrmap e14: a(active, since 37s), standbys: b 2026-03-07T10:19:32.368 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:31 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "b", "id": "b"}]: dispatch 2026-03-07T10:19:32.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:31 vm06 ceph-mon[56197]: Deploying daemon osd.0 on vm03 2026-03-07T10:19:32.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:31 vm06 ceph-mon[56197]: mgrmap e14: a(active, since 37s), standbys: b 2026-03-07T10:19:32.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:31 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "b", "id": "b"}]: dispatch 2026-03-07T10:19:33.097 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:32 vm03 ceph-mon[50786]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:33.098 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:32 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:33.098 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:32 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:33.098 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:32 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:33.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:32 vm08 ceph-mon[55906]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:33.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:32 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:33.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:32 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:33.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:32 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:33.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:32 vm06 ceph-mon[56197]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:33.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:32 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:33.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:32 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:33.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:32 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:33.911 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 0 on host 'vm03' 2026-03-07T10:19:33.976 DEBUG:teuthology.orchestra.run.vm03:osd.0> sudo journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@osd.0.service 2026-03-07T10:19:33.978 INFO:tasks.cephadm:Deploying osd.1 on vm06 with /dev/vde... 2026-03-07T10:19:33.978 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- lvm zap /dev/vde 2026-03-07T10:19:34.073 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:34 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.073 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:34 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.073 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:34 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:34.073 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:34 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:34.073 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:34 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.073 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:34 vm03 ceph-mon[50786]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:34.073 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:34 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:34.073 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:34 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.073 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:34 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.153 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.b/config 2026-03-07T10:19:34.297 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:34 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.297 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:34 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.297 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:34 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:34.297 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:34 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:34.297 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:34 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.297 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:34 vm06 ceph-mon[56197]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:34.297 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:34 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:34.297 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:34 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.297 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:34 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.362 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:19:34 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0[62416]: 2026-03-07T10:19:34.191+0000 7fdc8e477740 -1 osd.0 0 log_to_monitors true 2026-03-07T10:19:34.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:34 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:34 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:34 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:34.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:34 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:34.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:34 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:34 vm08 ceph-mon[55906]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:34.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:34 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:34.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:34 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:34.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:34 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:35.199 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-07T10:19:35.214 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph orch daemon add osd vm06:/dev/vde 2026-03-07T10:19:35.370 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.b/config 2026-03-07T10:19:35.393 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:35 vm06 ceph-mon[56197]: from='osd.0 [v2:192.168.123.103:6802/3001304104,v1:192.168.123.103:6803/3001304104]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-07T10:19:35.393 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:35 vm06 ceph-mon[56197]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-07T10:19:35.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:35 vm03 ceph-mon[50786]: from='osd.0 [v2:192.168.123.103:6802/3001304104,v1:192.168.123.103:6803/3001304104]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-07T10:19:35.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:35 vm03 ceph-mon[50786]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-07T10:19:35.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:35 vm08 ceph-mon[55906]: from='osd.0 [v2:192.168.123.103:6802/3001304104,v1:192.168.123.103:6803/3001304104]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-07T10:19:35.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:35 vm08 ceph-mon[55906]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: osdmap e6: 1 total, 0 up, 1 in 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='osd.0 [v2:192.168.123.103:6802/3001304104,v1:192.168.123.103:6803/3001304104]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: Detected new or changed devices on vm03 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: Adjusting osd_memory_target on vm03 to 257.0M 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: Unable to set osd_memory_target on vm03 to 269533593: error parsing value: Value '269533593' is below minimum 939524096 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='client.24119 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:19:36.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:36.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-07T10:19:36.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: osdmap e7: 1 total, 0 up, 1 in 2026-03-07T10:19:36.219 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:36 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:36.412 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:19:36 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0[62416]: 2026-03-07T10:19:36.083+0000 7fdc8a3f8640 -1 osd.0 0 waiting for initial osdmap 2026-03-07T10:19:36.412 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:19:36 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0[62416]: 2026-03-07T10:19:36.091+0000 7fdc86222640 -1 osd.0 7 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: osdmap e6: 1 total, 0 up, 1 in 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='osd.0 [v2:192.168.123.103:6802/3001304104,v1:192.168.123.103:6803/3001304104]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: Detected new or changed devices on vm03 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: Adjusting osd_memory_target on vm03 to 257.0M 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: Unable to set osd_memory_target on vm03 to 269533593: error parsing value: Value '269533593' is below minimum 939524096 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='client.24119 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:36.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-07T10:19:36.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: osdmap e7: 1 total, 0 up, 1 in 2026-03-07T10:19:36.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:36 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: osdmap e6: 1 total, 0 up, 1 in 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='osd.0 [v2:192.168.123.103:6802/3001304104,v1:192.168.123.103:6803/3001304104]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: Detected new or changed devices on vm03 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: Adjusting osd_memory_target on vm03 to 257.0M 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: Unable to set osd_memory_target on vm03 to 269533593: error parsing value: Value '269533593' is below minimum 939524096 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='client.24119 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm06:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: osdmap e7: 1 total, 0 up, 1 in 2026-03-07T10:19:36.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:36 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:37 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:37 vm03 ceph-mon[50786]: from='client.? 192.168.123.106:0/977516502' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8654b7ef-7e65-47e2-8422-0002d4c5dc1e"}]: dispatch 2026-03-07T10:19:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:37 vm03 ceph-mon[50786]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8654b7ef-7e65-47e2-8422-0002d4c5dc1e"}]: dispatch 2026-03-07T10:19:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:37 vm03 ceph-mon[50786]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8654b7ef-7e65-47e2-8422-0002d4c5dc1e"}]': finished 2026-03-07T10:19:37.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:37 vm03 ceph-mon[50786]: osd.0 [v2:192.168.123.103:6802/3001304104,v1:192.168.123.103:6803/3001304104] boot 2026-03-07T10:19:37.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:37 vm03 ceph-mon[50786]: osdmap e8: 2 total, 1 up, 2 in 2026-03-07T10:19:37.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:37 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:37.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:37 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:37.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:37 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:37.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:37 vm06 ceph-mon[56197]: from='client.? 192.168.123.106:0/977516502' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8654b7ef-7e65-47e2-8422-0002d4c5dc1e"}]: dispatch 2026-03-07T10:19:37.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:37 vm06 ceph-mon[56197]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8654b7ef-7e65-47e2-8422-0002d4c5dc1e"}]: dispatch 2026-03-07T10:19:37.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:37 vm06 ceph-mon[56197]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8654b7ef-7e65-47e2-8422-0002d4c5dc1e"}]': finished 2026-03-07T10:19:37.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:37 vm06 ceph-mon[56197]: osd.0 [v2:192.168.123.103:6802/3001304104,v1:192.168.123.103:6803/3001304104] boot 2026-03-07T10:19:37.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:37 vm06 ceph-mon[56197]: osdmap e8: 2 total, 1 up, 2 in 2026-03-07T10:19:37.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:37 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:37.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:37 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:37.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:37 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:37.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:37 vm08 ceph-mon[55906]: from='client.? 192.168.123.106:0/977516502' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8654b7ef-7e65-47e2-8422-0002d4c5dc1e"}]: dispatch 2026-03-07T10:19:37.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:37 vm08 ceph-mon[55906]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8654b7ef-7e65-47e2-8422-0002d4c5dc1e"}]: dispatch 2026-03-07T10:19:37.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:37 vm08 ceph-mon[55906]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8654b7ef-7e65-47e2-8422-0002d4c5dc1e"}]': finished 2026-03-07T10:19:37.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:37 vm08 ceph-mon[55906]: osd.0 [v2:192.168.123.103:6802/3001304104,v1:192.168.123.103:6803/3001304104] boot 2026-03-07T10:19:37.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:37 vm08 ceph-mon[55906]: osdmap e8: 2 total, 1 up, 2 in 2026-03-07T10:19:37.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:37 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:19:37.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:37 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:38.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:38 vm03 ceph-mon[50786]: purged_snaps scrub starts 2026-03-07T10:19:38.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:38 vm03 ceph-mon[50786]: purged_snaps scrub ok 2026-03-07T10:19:38.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:38 vm03 ceph-mon[50786]: from='client.? 192.168.123.106:0/2933410524' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:19:38.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:38 vm03 ceph-mon[50786]: pgmap v20: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:38.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:38 vm06 ceph-mon[56197]: purged_snaps scrub starts 2026-03-07T10:19:38.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:38 vm06 ceph-mon[56197]: purged_snaps scrub ok 2026-03-07T10:19:38.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:38 vm06 ceph-mon[56197]: from='client.? 192.168.123.106:0/2933410524' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:19:38.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:38 vm06 ceph-mon[56197]: pgmap v20: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:38.540 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:38 vm08 ceph-mon[55906]: purged_snaps scrub starts 2026-03-07T10:19:38.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:38 vm08 ceph-mon[55906]: purged_snaps scrub ok 2026-03-07T10:19:38.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:38 vm08 ceph-mon[55906]: from='client.? 192.168.123.106:0/2933410524' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:19:38.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:38 vm08 ceph-mon[55906]: pgmap v20: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:39.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:39 vm03 ceph-mon[50786]: osdmap e9: 2 total, 1 up, 2 in 2026-03-07T10:19:39.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:39 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:39.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:39 vm06 ceph-mon[56197]: osdmap e9: 2 total, 1 up, 2 in 2026-03-07T10:19:39.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:39 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:39.540 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:39 vm08 ceph-mon[55906]: osdmap e9: 2 total, 1 up, 2 in 2026-03-07T10:19:39.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:39 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:40.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:40 vm03 ceph-mon[50786]: pgmap v22: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:40.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:40 vm06 ceph-mon[56197]: pgmap v22: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:40.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:40 vm08 ceph-mon[55906]: pgmap v22: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:41.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:41 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:19:41.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:41 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:41.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:41 vm06 ceph-mon[56197]: Deploying daemon osd.1 on vm06 2026-03-07T10:19:41.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:41 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:19:41.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:41 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:41.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:41 vm03 ceph-mon[50786]: Deploying daemon osd.1 on vm06 2026-03-07T10:19:41.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:41 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:19:41.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:41 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:41.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:41 vm08 ceph-mon[55906]: Deploying daemon osd.1 on vm06 2026-03-07T10:19:42.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:42 vm03 ceph-mon[50786]: pgmap v23: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:42.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:42 vm06 ceph-mon[56197]: pgmap v23: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:42.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:42 vm08 ceph-mon[55906]: pgmap v23: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:43.345 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:43 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:43.346 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:43 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:43.346 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:43 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:43.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:43 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:43.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:43 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:43.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:43 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:43.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:43 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:43.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:43 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:43.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:43 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:44.743 INFO:teuthology.orchestra.run.vm06.stdout:Created osd(s) 1 on host 'vm06' 2026-03-07T10:19:44.803 DEBUG:teuthology.orchestra.run.vm06:osd.1> sudo journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@osd.1.service 2026-03-07T10:19:44.804 INFO:tasks.cephadm:Deploying osd.2 on vm08 with /dev/vde... 2026-03-07T10:19:44.804 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- lvm zap /dev/vde 2026-03-07T10:19:44.955 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:44 vm06 ceph-mon[56197]: pgmap v24: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:44.956 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:44 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:44.956 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:44 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:44.956 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:44 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:44.956 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:44 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:44.956 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:44 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:44.956 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:44 vm06 ceph-mon[56197]: from='osd.1 [v2:192.168.123.106:6800/1330210929,v1:192.168.123.106:6801/1330210929]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:19:44.956 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:44 vm06 ceph-mon[56197]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:19:44.956 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:44 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:44.956 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:44 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:44.956 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:44 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:44.963 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.c/config 2026-03-07T10:19:45.117 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:44 vm08 ceph-mon[55906]: pgmap v24: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:45.118 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:44 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:45.118 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:44 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:45.118 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:44 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:45.118 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:44 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:45.118 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:44 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:45.118 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:44 vm08 ceph-mon[55906]: from='osd.1 [v2:192.168.123.106:6800/1330210929,v1:192.168.123.106:6801/1330210929]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:19:45.118 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:44 vm08 ceph-mon[55906]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:19:45.118 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:44 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:45.118 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:44 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:45.118 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:44 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:45.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:44 vm03 ceph-mon[50786]: pgmap v24: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:45.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:44 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:45.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:44 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:45.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:44 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:45.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:44 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:45.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:44 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:45.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:44 vm03 ceph-mon[50786]: from='osd.1 [v2:192.168.123.106:6800/1330210929,v1:192.168.123.106:6801/1330210929]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:19:45.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:44 vm03 ceph-mon[50786]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:19:45.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:44 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:45.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:44 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:45.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:44 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:46.035 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-07T10:19:46.050 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph orch daemon add osd vm08:/dev/vde 2026-03-07T10:19:46.204 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.c/config 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: osdmap e10: 2 total, 1 up, 2 in 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: from='osd.1 [v2:192.168.123.106:6800/1330210929,v1:192.168.123.106:6801/1330210929]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: pgmap v26: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: Detected new or changed devices on vm06 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.1", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: Adjusting osd_memory_target on vm06 to 257.0M 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: Unable to set osd_memory_target on vm06 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:46.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:46.542 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:46 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: osdmap e10: 2 total, 1 up, 2 in 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: from='osd.1 [v2:192.168.123.106:6800/1330210929,v1:192.168.123.106:6801/1330210929]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: pgmap v26: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: Detected new or changed devices on vm06 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.1", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: Adjusting osd_memory_target on vm06 to 257.0M 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: Unable to set osd_memory_target on vm06 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:46.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:46 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: osdmap e10: 2 total, 1 up, 2 in 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: from='osd.1 [v2:192.168.123.106:6800/1330210929,v1:192.168.123.106:6801/1330210929]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]: dispatch 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: pgmap v26: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: Detected new or changed devices on vm06 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.1", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: Adjusting osd_memory_target on vm06 to 257.0M 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: Unable to set osd_memory_target on vm06 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:46.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:46 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:47.622 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:47 vm08 ceph-mon[55906]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-07T10:19:47.622 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:47 vm08 ceph-mon[55906]: osdmap e11: 2 total, 1 up, 2 in 2026-03-07T10:19:47.622 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:47 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:47.622 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:47 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:47.622 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:47 vm08 ceph-mon[55906]: from='client.24145 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:47.622 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:47 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:19:47.622 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:47 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:19:47.622 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:47 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:47.622 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:47 vm08 ceph-mon[55906]: from='osd.1 ' entity='osd.1' 2026-03-07T10:19:47.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:47 vm03 ceph-mon[50786]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-07T10:19:47.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:47 vm03 ceph-mon[50786]: osdmap e11: 2 total, 1 up, 2 in 2026-03-07T10:19:47.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:47 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:47.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:47 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:47.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:47 vm03 ceph-mon[50786]: from='client.24145 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:47.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:47 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:19:47.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:47 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:19:47.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:47 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:47.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:47 vm03 ceph-mon[50786]: from='osd.1 ' entity='osd.1' 2026-03-07T10:19:47.717 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:47 vm06 ceph-mon[56197]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-03-07T10:19:47.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:47 vm06 ceph-mon[56197]: osdmap e11: 2 total, 1 up, 2 in 2026-03-07T10:19:47.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:47 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:47.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:47 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:47.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:47 vm06 ceph-mon[56197]: from='client.24145 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:19:47.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:47 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:19:47.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:47 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:19:47.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:47 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:47.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:47 vm06 ceph-mon[56197]: from='osd.1 ' entity='osd.1' 2026-03-07T10:19:47.718 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:19:47 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1[59855]: 2026-03-07T10:19:47.256+0000 7fdcd64d1640 -1 osd.1 0 waiting for initial osdmap 2026-03-07T10:19:47.718 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:19:47 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1[59855]: 2026-03-07T10:19:47.263+0000 7fdcd12e7640 -1 osd.1 11 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: purged_snaps scrub starts 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: purged_snaps scrub ok 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: pgmap v28: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: from='client.? 192.168.123.108:0/1609347666' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "edd117d5-6f93-435c-84df-1764f760e9c5"}]: dispatch 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "edd117d5-6f93-435c-84df-1764f760e9c5"}]: dispatch 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "edd117d5-6f93-435c-84df-1764f760e9c5"}]': finished 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: osd.1 [v2:192.168.123.106:6800/1330210929,v1:192.168.123.106:6801/1330210929] boot 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: osdmap e12: 3 total, 2 up, 3 in 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:48.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:48 vm03 ceph-mon[50786]: from='client.? 192.168.123.108:0/2699902762' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:19:48.717 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: purged_snaps scrub starts 2026-03-07T10:19:48.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: purged_snaps scrub ok 2026-03-07T10:19:48.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:48.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: pgmap v28: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:48.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: from='client.? 192.168.123.108:0/1609347666' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "edd117d5-6f93-435c-84df-1764f760e9c5"}]: dispatch 2026-03-07T10:19:48.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "edd117d5-6f93-435c-84df-1764f760e9c5"}]: dispatch 2026-03-07T10:19:48.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "edd117d5-6f93-435c-84df-1764f760e9c5"}]': finished 2026-03-07T10:19:48.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: osd.1 [v2:192.168.123.106:6800/1330210929,v1:192.168.123.106:6801/1330210929] boot 2026-03-07T10:19:48.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: osdmap e12: 3 total, 2 up, 3 in 2026-03-07T10:19:48.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:48.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:48.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:48 vm06 ceph-mon[56197]: from='client.? 192.168.123.108:0/2699902762' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: purged_snaps scrub starts 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: purged_snaps scrub ok 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: pgmap v28: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: from='client.? 192.168.123.108:0/1609347666' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "edd117d5-6f93-435c-84df-1764f760e9c5"}]: dispatch 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "edd117d5-6f93-435c-84df-1764f760e9c5"}]: dispatch 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "edd117d5-6f93-435c-84df-1764f760e9c5"}]': finished 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: osd.1 [v2:192.168.123.106:6800/1330210929,v1:192.168.123.106:6801/1330210929] boot 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: osdmap e12: 3 total, 2 up, 3 in 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:48.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:48 vm08 ceph-mon[55906]: from='client.? 192.168.123.108:0/2699902762' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:19:50.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:50 vm03 ceph-mon[50786]: pgmap v30: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:50.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:50 vm03 ceph-mon[50786]: osdmap e13: 3 total, 2 up, 3 in 2026-03-07T10:19:50.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:50 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:50.717 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:50 vm06 ceph-mon[56197]: pgmap v30: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:50.717 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:50 vm06 ceph-mon[56197]: osdmap e13: 3 total, 2 up, 3 in 2026-03-07T10:19:50.717 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:50 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:50.729 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:50 vm08 ceph-mon[55906]: pgmap v30: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:50.729 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:50 vm08 ceph-mon[55906]: osdmap e13: 3 total, 2 up, 3 in 2026-03-07T10:19:50.729 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:50 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:52.377 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:52 vm08 ceph-mon[55906]: pgmap v32: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:52.377 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:52 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-07T10:19:52.377 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:52 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:52.377 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:52 vm08 ceph-mon[55906]: Deploying daemon osd.2 on vm08 2026-03-07T10:19:52.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:52 vm03 ceph-mon[50786]: pgmap v32: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:52.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:52 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-07T10:19:52.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:52 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:52.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:52 vm03 ceph-mon[50786]: Deploying daemon osd.2 on vm08 2026-03-07T10:19:52.717 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:52 vm06 ceph-mon[56197]: pgmap v32: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:52.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:52 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-07T10:19:52.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:52 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:52.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:52 vm06 ceph-mon[56197]: Deploying daemon osd.2 on vm08 2026-03-07T10:19:54.600 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:54 vm08 ceph-mon[55906]: pgmap v33: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:54.600 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:54 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:54.600 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:54 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:54.600 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:54 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:54.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:54 vm03 ceph-mon[50786]: pgmap v33: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:54.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:54 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:54.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:54 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:54.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:54 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:54.717 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:54 vm06 ceph-mon[56197]: pgmap v33: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:54.717 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:54 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:54.717 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:54 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:54.717 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:54 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.654 INFO:teuthology.orchestra.run.vm08.stdout:Created osd(s) 2 on host 'vm08' 2026-03-07T10:19:55.729 DEBUG:teuthology.orchestra.run.vm08:osd.2> sudo journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@osd.2.service 2026-03-07T10:19:55.731 INFO:tasks.cephadm:Waiting for 3 OSDs to come up... 2026-03-07T10:19:55.731 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd stat -f json 2026-03-07T10:19:55.855 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:55 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.855 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:55 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.855 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:55 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:55.855 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:55 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:55.855 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:55 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.855 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:55 vm08 ceph-mon[55906]: from='osd.2 [v2:192.168.123.108:6800/2135409224,v1:192.168.123.108:6801/2135409224]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:19:55.855 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:55 vm08 ceph-mon[55906]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:19:55.855 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:55 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:55.855 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:55 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.856 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:55 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.890 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:19:55.942 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.942 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.942 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:55.942 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:55.942 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.942 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:55 vm03 ceph-mon[50786]: from='osd.2 [v2:192.168.123.108:6800/2135409224,v1:192.168.123.108:6801/2135409224]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:19:55.942 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:55 vm03 ceph-mon[50786]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:19:55.942 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:55.942 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.942 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:55 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:55 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:55 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:55 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:55.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:55 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:55.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:55 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:55 vm06 ceph-mon[56197]: from='osd.2 [v2:192.168.123.108:6800/2135409224,v1:192.168.123.108:6801/2135409224]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:19:55.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:55 vm06 ceph-mon[56197]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:19:55.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:55 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:19:55.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:55 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:55.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:55 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:56.194 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:19:56.238 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":14,"num_osds":3,"num_up_osds":2,"osd_up_since":1772878787,"num_in_osds":3,"osd_in_since":1772878787,"num_remapped_pgs":0} 2026-03-07T10:19:56.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:56 vm06 ceph-mon[56197]: pgmap v34: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:56.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:56 vm06 ceph-mon[56197]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-07T10:19:56.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:56 vm06 ceph-mon[56197]: osdmap e14: 3 total, 2 up, 3 in 2026-03-07T10:19:56.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:56 vm06 ceph-mon[56197]: from='osd.2 [v2:192.168.123.108:6800/2135409224,v1:192.168.123.108:6801/2135409224]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-07T10:19:56.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:56 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:56.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:56 vm06 ceph-mon[56197]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-07T10:19:56.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:56 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/1737187730' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:19:57.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:56 vm08 ceph-mon[55906]: pgmap v34: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:57.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:56 vm08 ceph-mon[55906]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-07T10:19:57.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:56 vm08 ceph-mon[55906]: osdmap e14: 3 total, 2 up, 3 in 2026-03-07T10:19:57.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:56 vm08 ceph-mon[55906]: from='osd.2 [v2:192.168.123.108:6800/2135409224,v1:192.168.123.108:6801/2135409224]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-07T10:19:57.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:56 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:57.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:56 vm08 ceph-mon[55906]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-07T10:19:57.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:56 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/1737187730' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:19:57.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:56 vm03 ceph-mon[50786]: pgmap v34: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:57.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:56 vm03 ceph-mon[50786]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-07T10:19:57.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:56 vm03 ceph-mon[50786]: osdmap e14: 3 total, 2 up, 3 in 2026-03-07T10:19:57.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:56 vm03 ceph-mon[50786]: from='osd.2 [v2:192.168.123.108:6800/2135409224,v1:192.168.123.108:6801/2135409224]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-07T10:19:57.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:56 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:57.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:56 vm03 ceph-mon[50786]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-07T10:19:57.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:56 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/1737187730' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:19:57.238 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd stat -f json 2026-03-07T10:19:57.406 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:19:57.747 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: osdmap e15: 3 total, 2 up, 3 in 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: Detected new or changed devices on vm08 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.2", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: Adjusting osd_memory_target on vm08 to 4353M 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:57.774 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:57 vm03 ceph-mon[50786]: from='osd.2 ' entity='osd.2' 2026-03-07T10:19:57.793 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":15,"num_osds":3,"num_up_osds":2,"osd_up_since":1772878787,"num_in_osds":3,"osd_in_since":1772878787,"num_remapped_pgs":0} 2026-03-07T10:19:57.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-07T10:19:57.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: osdmap e15: 3 total, 2 up, 3 in 2026-03-07T10:19:57.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:57.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:57.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: Detected new or changed devices on vm08 2026-03-07T10:19:57.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:57.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:57.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.2", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:57.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: Adjusting osd_memory_target on vm08 to 4353M 2026-03-07T10:19:57.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:57.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:57.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:57.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:57.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:57 vm06 ceph-mon[56197]: from='osd.2 ' entity='osd.2' 2026-03-07T10:19:58.041 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:19:57 vm08 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2[59333]: 2026-03-07T10:19:57.661+0000 7ffa872f0640 -1 osd.2 0 waiting for initial osdmap 2026-03-07T10:19:58.041 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:19:57 vm08 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2[59333]: 2026-03-07T10:19:57.667+0000 7ffa82919640 -1 osd.2 15 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: osdmap e15: 3 total, 2 up, 3 in 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: Detected new or changed devices on vm08 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.2", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: Adjusting osd_memory_target on vm08 to 4353M 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' 2026-03-07T10:19:58.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:57 vm08 ceph-mon[55906]: from='osd.2 ' entity='osd.2' 2026-03-07T10:19:58.794 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd stat -f json 2026-03-07T10:19:58.949 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:19:58.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:58 vm06 ceph-mon[56197]: purged_snaps scrub starts 2026-03-07T10:19:58.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:58 vm06 ceph-mon[56197]: purged_snaps scrub ok 2026-03-07T10:19:58.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:58 vm06 ceph-mon[56197]: pgmap v37: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:58.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:58 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:58.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:58 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/2168380849' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:19:58.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:58 vm06 ceph-mon[56197]: osd.2 [v2:192.168.123.108:6800/2135409224,v1:192.168.123.108:6801/2135409224] boot 2026-03-07T10:19:58.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:58 vm06 ceph-mon[56197]: osdmap e16: 3 total, 3 up, 3 in 2026-03-07T10:19:58.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:58 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:58.969 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:58 vm03 ceph-mon[50786]: purged_snaps scrub starts 2026-03-07T10:19:58.969 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:58 vm03 ceph-mon[50786]: purged_snaps scrub ok 2026-03-07T10:19:58.969 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:58 vm03 ceph-mon[50786]: pgmap v37: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:58.969 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:58 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:58.969 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:58 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/2168380849' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:19:58.969 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:58 vm03 ceph-mon[50786]: osd.2 [v2:192.168.123.108:6800/2135409224,v1:192.168.123.108:6801/2135409224] boot 2026-03-07T10:19:58.969 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:58 vm03 ceph-mon[50786]: osdmap e16: 3 total, 3 up, 3 in 2026-03-07T10:19:58.969 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:58 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:59.040 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:58 vm08 ceph-mon[55906]: purged_snaps scrub starts 2026-03-07T10:19:59.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:58 vm08 ceph-mon[55906]: purged_snaps scrub ok 2026-03-07T10:19:59.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:58 vm08 ceph-mon[55906]: pgmap v37: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:19:59.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:58 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:59.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:58 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/2168380849' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:19:59.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:58 vm08 ceph-mon[55906]: osd.2 [v2:192.168.123.108:6800/2135409224,v1:192.168.123.108:6801/2135409224] boot 2026-03-07T10:19:59.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:58 vm08 ceph-mon[55906]: osdmap e16: 3 total, 3 up, 3 in 2026-03-07T10:19:59.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:58 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:19:59.251 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:19:59.319 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":16,"num_osds":3,"num_up_osds":3,"osd_up_since":1772878798,"num_in_osds":3,"osd_in_since":1772878787,"num_remapped_pgs":0} 2026-03-07T10:19:59.319 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd dump --format=json 2026-03-07T10:19:59.475 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:19:59.786 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:19:59.786 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:59 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/713403307' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:19:59.786 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:19:59 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:19:59.786 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":17,"fsid":"d33fdf60-1a0e-11f1-a719-83e365122cb4","created":"2026-03-07T10:18:09.708130+0000","modified":"2026-03-07T10:19:59.712234+0000","last_up_change":"2026-03-07T10:19:58.663771+0000","last_in_change":"2026-03-07T10:19:47.576546+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":8,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":3,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-07T10:19:59.371282+0000","flags":32769,"flags_names":"hashpspool,creating","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"17","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{},"read_balance":{"score_type":"Fair distribution","score_acting":3,"score_stable":3,"optimal_score":1,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"d8ac3e4c-913c-4569-8ccb-8da4fe2e73a7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6803","nonce":3001304104}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6805","nonce":3001304104}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6809","nonce":3001304104}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6807","nonce":3001304104}]},"public_addr":"192.168.123.103:6803/3001304104","cluster_addr":"192.168.123.103:6805/3001304104","heartbeat_back_addr":"192.168.123.103:6809/3001304104","heartbeat_front_addr":"192.168.123.103:6807/3001304104","state":["exists","up"]},{"osd":1,"uuid":"8654b7ef-7e65-47e2-8422-0002d4c5dc1e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6800","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6801","nonce":1330210929}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6803","nonce":1330210929}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6807","nonce":1330210929}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6805","nonce":1330210929}]},"public_addr":"192.168.123.106:6801/1330210929","cluster_addr":"192.168.123.106:6803/1330210929","heartbeat_back_addr":"192.168.123.106:6807/1330210929","heartbeat_front_addr":"192.168.123.106:6805/1330210929","state":["exists","up"]},{"osd":2,"uuid":"edd117d5-6f93-435c-84df-1764f760e9c5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6801","nonce":2135409224}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6803","nonce":2135409224}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6807","nonce":2135409224}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6805","nonce":2135409224}]},"public_addr":"192.168.123.108:6801/2135409224","cluster_addr":"192.168.123.108:6803/2135409224","heartbeat_back_addr":"192.168.123.108:6807/2135409224","heartbeat_front_addr":"192.168.123.108:6805/2135409224","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:19:35.175016+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:19:45.432836+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:19:56.677895+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:6800/1703727429":"2026-03-08T10:18:53.307513+0000","192.168.123.103:0/351669787":"2026-03-08T10:18:53.307513+0000","192.168.123.103:6801/1703727429":"2026-03-08T10:18:53.307513+0000","192.168.123.103:0/2348711816":"2026-03-08T10:18:35.741960+0000","192.168.123.103:0/754077168":"2026-03-08T10:18:53.307513+0000","192.168.123.103:0/1698969848":"2026-03-08T10:18:35.741960+0000","192.168.123.103:0/139778878":"2026-03-08T10:18:35.741960+0000","192.168.123.103:6801/1601854214":"2026-03-08T10:18:35.741960+0000","192.168.123.103:0/1964757504":"2026-03-08T10:18:53.307513+0000","192.168.123.103:6800/1601854214":"2026-03-08T10:18:35.741960+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-07T10:19:59.837 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-07T10:19:59.371282+0000', 'flags': 32769, 'flags_names': 'hashpspool,creating', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'is_stretch_pool': False, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '17', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {}, 'read_balance': {'score_type': 'Fair distribution', 'score_acting': 3, 'score_stable': 3, 'optimal_score': 1, 'raw_score_acting': 3, 'raw_score_stable': 3, 'primary_affinity_weighted': 1, 'average_primary_affinity': 1, 'average_primary_affinity_weighted': 1}}] 2026-03-07T10:19:59.837 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd pool get .mgr pg_num 2026-03-07T10:19:59.995 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:00.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:59 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/713403307' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:20:00.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:19:59 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:20:00.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:59 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/713403307' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:20:00.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:19:59 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:20:00.305 INFO:teuthology.orchestra.run.vm03.stdout:pg_num: 1 2026-03-07T10:20:00.351 INFO:tasks.cephadm:Setting up client nodes... 2026-03-07T10:20:00.351 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-07T10:20:00.351 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-07T10:20:00.351 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph mgr dump --format=json 2026-03-07T10:20:00.506 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:00.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:00 vm03 ceph-mon[50786]: pgmap v39: 0 pgs: ; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:00.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:00 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-07T10:20:00.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:00 vm03 ceph-mon[50786]: osdmap e17: 3 total, 3 up, 3 in 2026-03-07T10:20:00.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:00 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:20:00.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:00 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/1176358211' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:20:00.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:00 vm03 ceph-mon[50786]: overall HEALTH_OK 2026-03-07T10:20:00.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:00 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/2070073784' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-07T10:20:00.858 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:00 vm03 sudo[66487]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vde 2026-03-07T10:20:00.858 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:00 vm03 sudo[66487]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:20:00.858 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:00 vm03 sudo[66487]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:20:00.858 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:00 vm03 sudo[66487]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:20:00.858 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:20:00.917 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":14,"flags":0,"active_gid":14156,"active_name":"a","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6800","nonce":4055809175},{"type":"v1","addr":"192.168.123.103:6801","nonce":4055809175}]},"active_addr":"192.168.123.103:6801/4055809175","active_change":"2026-03-07T10:18:53.307769+0000","active_mgr_features":4540701547738038271,"available":true,"standbys":[{"gid":14214,"name":"b","mgr_features":4540701547738038271,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.103:8443/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":3,"active_clients":[{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":2583693061}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":4236289228}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":4017979671}]}]} 2026-03-07T10:20:00.919 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-07T10:20:00.919 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-07T10:20:00.919 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd dump --format=json 2026-03-07T10:20:01.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:00 vm08 ceph-mon[55906]: pgmap v39: 0 pgs: ; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:01.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:00 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-07T10:20:01.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:00 vm08 ceph-mon[55906]: osdmap e17: 3 total, 3 up, 3 in 2026-03-07T10:20:01.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:00 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:20:01.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:00 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/1176358211' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:20:01.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:00 vm08 ceph-mon[55906]: overall HEALTH_OK 2026-03-07T10:20:01.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:00 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/2070073784' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-07T10:20:01.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:00 vm08 sudo[62903]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-07T10:20:01.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:00 vm08 sudo[62903]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:20:01.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:00 vm08 sudo[62903]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:20:01.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:00 vm08 sudo[62903]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:20:01.041 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:00 vm08 sudo[62900]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vde 2026-03-07T10:20:01.041 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:00 vm08 sudo[62900]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:20:01.041 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:00 vm08 sudo[62900]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:20:01.041 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:00 vm08 sudo[62900]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:20:01.078 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:01.114 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:00 vm03 sudo[66491]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-07T10:20:01.114 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:00 vm03 sudo[66491]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:20:01.115 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:00 vm03 sudo[66491]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:20:01.115 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:00 vm03 sudo[66491]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:20:01.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:00 vm06 ceph-mon[56197]: pgmap v39: 0 pgs: ; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:01.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:00 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-07T10:20:01.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:00 vm06 ceph-mon[56197]: osdmap e17: 3 total, 3 up, 3 in 2026-03-07T10:20:01.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:00 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:20:01.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:00 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/1176358211' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:20:01.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:00 vm06 ceph-mon[56197]: overall HEALTH_OK 2026-03-07T10:20:01.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:00 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/2070073784' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-07T10:20:01.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:00 vm06 sudo[63111]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-07T10:20:01.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:00 vm06 sudo[63111]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:20:01.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:00 vm06 sudo[63111]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:20:01.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:00 vm06 sudo[63111]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:20:01.218 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:00 vm06 sudo[63108]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vde 2026-03-07T10:20:01.218 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:00 vm06 sudo[63108]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:20:01.218 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:00 vm06 sudo[63108]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:20:01.218 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:00 vm06 sudo[63108]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:20:01.378 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:20:01.378 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":18,"fsid":"d33fdf60-1a0e-11f1-a719-83e365122cb4","created":"2026-03-07T10:18:09.708130+0000","modified":"2026-03-07T10:20:00.718215+0000","last_up_change":"2026-03-07T10:19:58.663771+0000","last_in_change":"2026-03-07T10:19:47.576546+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":8,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":3,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-07T10:19:59.371282+0000","flags":32769,"flags_names":"hashpspool,creating","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"18","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":3,"score_stable":3,"optimal_score":1,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"d8ac3e4c-913c-4569-8ccb-8da4fe2e73a7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6803","nonce":3001304104}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6805","nonce":3001304104}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6809","nonce":3001304104}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6807","nonce":3001304104}]},"public_addr":"192.168.123.103:6803/3001304104","cluster_addr":"192.168.123.103:6805/3001304104","heartbeat_back_addr":"192.168.123.103:6809/3001304104","heartbeat_front_addr":"192.168.123.103:6807/3001304104","state":["exists","up"]},{"osd":1,"uuid":"8654b7ef-7e65-47e2-8422-0002d4c5dc1e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":17,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6800","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6801","nonce":1330210929}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6803","nonce":1330210929}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6807","nonce":1330210929}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6805","nonce":1330210929}]},"public_addr":"192.168.123.106:6801/1330210929","cluster_addr":"192.168.123.106:6803/1330210929","heartbeat_back_addr":"192.168.123.106:6807/1330210929","heartbeat_front_addr":"192.168.123.106:6805/1330210929","state":["exists","up"]},{"osd":2,"uuid":"edd117d5-6f93-435c-84df-1764f760e9c5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6801","nonce":2135409224}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6803","nonce":2135409224}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6807","nonce":2135409224}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6805","nonce":2135409224}]},"public_addr":"192.168.123.108:6801/2135409224","cluster_addr":"192.168.123.108:6803/2135409224","heartbeat_back_addr":"192.168.123.108:6807/2135409224","heartbeat_front_addr":"192.168.123.108:6805/2135409224","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:19:35.175016+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:19:45.432836+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:19:56.677895+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:6800/1703727429":"2026-03-08T10:18:53.307513+0000","192.168.123.103:0/351669787":"2026-03-08T10:18:53.307513+0000","192.168.123.103:6801/1703727429":"2026-03-08T10:18:53.307513+0000","192.168.123.103:0/2348711816":"2026-03-08T10:18:35.741960+0000","192.168.123.103:0/754077168":"2026-03-08T10:18:53.307513+0000","192.168.123.103:0/1698969848":"2026-03-08T10:18:35.741960+0000","192.168.123.103:0/139778878":"2026-03-08T10:18:35.741960+0000","192.168.123.103:6801/1601854214":"2026-03-08T10:18:35.741960+0000","192.168.123.103:0/1964757504":"2026-03-08T10:18:53.307513+0000","192.168.123.103:6800/1601854214":"2026-03-08T10:18:35.741960+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-07T10:20:01.428 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-07T10:20:01.428 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd dump --format=json 2026-03-07T10:20:01.584 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:01.899 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-07T10:20:01.899 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: osdmap e18: 3 total, 3 up, 3 in 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/1931240083' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:20:01.900 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:01 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/1053399318' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:20:01.900 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:20:01.900 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":19,"fsid":"d33fdf60-1a0e-11f1-a719-83e365122cb4","created":"2026-03-07T10:18:09.708130+0000","modified":"2026-03-07T10:20:01.725896+0000","last_up_change":"2026-03-07T10:19:58.663771+0000","last_in_change":"2026-03-07T10:19:47.576546+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":8,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":3,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-07T10:19:59.371282+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"19","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":3,"score_stable":3,"optimal_score":1,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"d8ac3e4c-913c-4569-8ccb-8da4fe2e73a7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6803","nonce":3001304104}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6805","nonce":3001304104}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6809","nonce":3001304104}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":3001304104},{"type":"v1","addr":"192.168.123.103:6807","nonce":3001304104}]},"public_addr":"192.168.123.103:6803/3001304104","cluster_addr":"192.168.123.103:6805/3001304104","heartbeat_back_addr":"192.168.123.103:6809/3001304104","heartbeat_front_addr":"192.168.123.103:6807/3001304104","state":["exists","up"]},{"osd":1,"uuid":"8654b7ef-7e65-47e2-8422-0002d4c5dc1e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":17,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6800","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6801","nonce":1330210929}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6803","nonce":1330210929}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6807","nonce":1330210929}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":1330210929},{"type":"v1","addr":"192.168.123.106:6805","nonce":1330210929}]},"public_addr":"192.168.123.106:6801/1330210929","cluster_addr":"192.168.123.106:6803/1330210929","heartbeat_back_addr":"192.168.123.106:6807/1330210929","heartbeat_front_addr":"192.168.123.106:6805/1330210929","state":["exists","up"]},{"osd":2,"uuid":"edd117d5-6f93-435c-84df-1764f760e9c5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6801","nonce":2135409224}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6803","nonce":2135409224}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6807","nonce":2135409224}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":2135409224},{"type":"v1","addr":"192.168.123.108:6805","nonce":2135409224}]},"public_addr":"192.168.123.108:6801/2135409224","cluster_addr":"192.168.123.108:6803/2135409224","heartbeat_back_addr":"192.168.123.108:6807/2135409224","heartbeat_front_addr":"192.168.123.108:6805/2135409224","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:19:35.175016+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:19:45.432836+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:19:56.677895+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:6800/1703727429":"2026-03-08T10:18:53.307513+0000","192.168.123.103:0/351669787":"2026-03-08T10:18:53.307513+0000","192.168.123.103:6801/1703727429":"2026-03-08T10:18:53.307513+0000","192.168.123.103:0/2348711816":"2026-03-08T10:18:35.741960+0000","192.168.123.103:0/754077168":"2026-03-08T10:18:53.307513+0000","192.168.123.103:0/1698969848":"2026-03-08T10:18:35.741960+0000","192.168.123.103:0/139778878":"2026-03-08T10:18:35.741960+0000","192.168.123.103:6801/1601854214":"2026-03-08T10:18:35.741960+0000","192.168.123.103:0/1964757504":"2026-03-08T10:18:53.307513+0000","192.168.123.103:6800/1601854214":"2026-03-08T10:18:35.741960+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-07T10:20:01.947 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph tell osd.0 flush_pg_stats 2026-03-07T10:20:01.947 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph tell osd.1 flush_pg_stats 2026-03-07T10:20:01.947 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph tell osd.2 flush_pg_stats 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: osdmap e18: 3 total, 3 up, 3 in 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/1931240083' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:20:02.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:01 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/1053399318' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:20:02.170 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:02.196 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: osdmap e18: 3 total, 3 up, 3 in 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/1931240083' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='mgr.14156 192.168.123.103:0/1738756644' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:20:02.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:01 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/1053399318' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:20:02.297 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:02.559 INFO:teuthology.orchestra.run.vm03.stdout:68719476738 2026-03-07T10:20:02.559 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd last-stat-seq osd.2 2026-03-07T10:20:02.673 INFO:teuthology.orchestra.run.vm03.stdout:34359738375 2026-03-07T10:20:02.673 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd last-stat-seq osd.0 2026-03-07T10:20:02.716 INFO:teuthology.orchestra.run.vm03.stdout:51539607556 2026-03-07T10:20:02.716 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd last-stat-seq osd.1 2026-03-07T10:20:02.788 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:02.821 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:02 vm03 ceph-mon[50786]: pgmap v42: 1 pgs: 1 unknown; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:02.821 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:02 vm03 ceph-mon[50786]: osdmap e19: 3 total, 3 up, 3 in 2026-03-07T10:20:02.821 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:02 vm03 ceph-mon[50786]: mgrmap e15: a(active, since 68s), standbys: b 2026-03-07T10:20:02.821 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:02 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/2522538153' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:20:02.952 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:03.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:02 vm08 ceph-mon[55906]: pgmap v42: 1 pgs: 1 unknown; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:03.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:02 vm08 ceph-mon[55906]: osdmap e19: 3 total, 3 up, 3 in 2026-03-07T10:20:03.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:02 vm08 ceph-mon[55906]: mgrmap e15: a(active, since 68s), standbys: b 2026-03-07T10:20:03.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:02 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/2522538153' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:20:03.071 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:03.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:02 vm06 ceph-mon[56197]: pgmap v42: 1 pgs: 1 unknown; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:03.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:02 vm06 ceph-mon[56197]: osdmap e19: 3 total, 3 up, 3 in 2026-03-07T10:20:03.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:02 vm06 ceph-mon[56197]: mgrmap e15: a(active, since 68s), standbys: b 2026-03-07T10:20:03.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:02 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/2522538153' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:20:03.265 INFO:teuthology.orchestra.run.vm03.stdout:68719476737 2026-03-07T10:20:03.325 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476738 got 68719476737 for osd.2 2026-03-07T10:20:03.389 INFO:teuthology.orchestra.run.vm03.stdout:34359738373 2026-03-07T10:20:03.438 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738375 got 34359738373 for osd.0 2026-03-07T10:20:03.480 INFO:teuthology.orchestra.run.vm03.stdout:51539607555 2026-03-07T10:20:03.523 INFO:tasks.cephadm.ceph_manager.ceph:need seq 51539607556 got 51539607555 for osd.1 2026-03-07T10:20:04.040 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:03 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/947283960' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:20:04.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:03 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/3614302685' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:20:04.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:03 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/1653704540' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:20:04.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:03 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/947283960' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:20:04.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:03 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3614302685' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:20:04.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:03 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/1653704540' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:20:04.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:03 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/947283960' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:20:04.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:03 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3614302685' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:20:04.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:03 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/1653704540' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:20:04.325 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd last-stat-seq osd.2 2026-03-07T10:20:04.439 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd last-stat-seq osd.0 2026-03-07T10:20:04.474 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:04.524 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph osd last-stat-seq osd.1 2026-03-07T10:20:04.751 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:04.756 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:04.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:04 vm03 ceph-mon[50786]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 481 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:04.883 INFO:teuthology.orchestra.run.vm03.stdout:68719476738 2026-03-07T10:20:04.939 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476738 got 68719476738 for osd.2 2026-03-07T10:20:04.940 DEBUG:teuthology.parallel:result is None 2026-03-07T10:20:05.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:04 vm08 ceph-mon[55906]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 481 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:05.120 INFO:teuthology.orchestra.run.vm03.stdout:34359738375 2026-03-07T10:20:05.167 INFO:teuthology.orchestra.run.vm03.stdout:51539607557 2026-03-07T10:20:05.170 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738375 got 34359738375 for osd.0 2026-03-07T10:20:05.170 DEBUG:teuthology.parallel:result is None 2026-03-07T10:20:05.212 INFO:tasks.cephadm.ceph_manager.ceph:need seq 51539607556 got 51539607557 for osd.1 2026-03-07T10:20:05.212 DEBUG:teuthology.parallel:result is None 2026-03-07T10:20:05.212 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-07T10:20:05.212 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph pg dump --format=json 2026-03-07T10:20:05.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:04 vm06 ceph-mon[56197]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 481 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:05.374 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:05.674 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:20:05.674 INFO:teuthology.orchestra.run.vm03.stderr:dumped all 2026-03-07T10:20:05.736 INFO:teuthology.orchestra.run.vm03.stdout:{"pg_ready":true,"pg_map":{"version":45,"stamp":"2026-03-07T10:20:05.323808+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":2,"num_osds":3,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":62902272,"kb_used":492360,"kb_used_data":1836,"kb_used_omap":4,"kb_used_meta":80443,"kb_avail":62409912,"statfs":{"total":64411926528,"available":63907749888,"internally_reserved":0,"allocated":1880064,"data_stored":1528524,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":4770,"internal_metadata":82373982},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"2.000243"},"pg_stats":[{"pgid":"1.0","version":"18'32","reported_seq":57,"reported_epoch":19,"state":"active+clean","last_fresh":"2026-03-07T10:20:01.740174+0000","last_change":"2026-03-07T10:20:00.730881+0000","last_active":"2026-03-07T10:20:01.740174+0000","last_peered":"2026-03-07T10:20:01.740174+0000","last_clean":"2026-03-07T10:20:01.740174+0000","last_became_active":"2026-03-07T10:20:00.730632+0000","last_became_peered":"2026-03-07T10:20:00.730632+0000","last_unstale":"2026-03-07T10:20:01.740174+0000","last_undegraded":"2026-03-07T10:20:01.740174+0000","last_fullsized":"2026-03-07T10:20:01.740174+0000","mapping_epoch":17,"log_start":"0'0","ondisk_log_start":"0'0","created":17,"last_epoch_clean":18,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-07T10:19:59.712234+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-07T10:19:59.712234+0000","last_clean_scrub_stamp":"2026-03-07T10:19:59.712234+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-08T19:34:01.027843+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[1,2,0],"acting":[1,2,0],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":1,"acting_primary":1,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1388544,"data_stored":1377840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":2,"up_from":16,"seq":68719476739,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":437188,"kb_used_data":612,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20530236,"statfs":{"total":21470642176,"available":21022961664,"internally_reserved":0,"allocated":626688,"data_stored":509508,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[]},{"osd":1,"up_from":12,"seq":51539607557,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27584,"kb_used_data":612,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939840,"statfs":{"total":21470642176,"available":21442396160,"internally_reserved":0,"allocated":626688,"data_stored":509508,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":8,"seq":34359738375,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27588,"kb_used_data":612,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939836,"statfs":{"total":21470642176,"available":21442392064,"internally_reserved":0,"allocated":626688,"data_stored":509508,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1,2],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-07T10:20:05.736 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph pg dump --format=json 2026-03-07T10:20:05.900 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:05.935 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:05 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/2243643016' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:20:05.935 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:05 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/4035593975' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:20:05.935 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:05 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/1646485927' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:20:06.040 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:05 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/2243643016' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:20:06.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:05 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/4035593975' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:20:06.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:05 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/1646485927' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:20:06.195 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:20:06.195 INFO:teuthology.orchestra.run.vm03.stderr:dumped all 2026-03-07T10:20:06.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:05 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/2243643016' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:20:06.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:05 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/4035593975' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:20:06.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:05 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/1646485927' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:20:06.256 INFO:teuthology.orchestra.run.vm03.stdout:{"pg_ready":true,"pg_map":{"version":45,"stamp":"2026-03-07T10:20:05.323808+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":2,"num_osds":3,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":62902272,"kb_used":492360,"kb_used_data":1836,"kb_used_omap":4,"kb_used_meta":80443,"kb_avail":62409912,"statfs":{"total":64411926528,"available":63907749888,"internally_reserved":0,"allocated":1880064,"data_stored":1528524,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":4770,"internal_metadata":82373982},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"2.000243"},"pg_stats":[{"pgid":"1.0","version":"18'32","reported_seq":57,"reported_epoch":19,"state":"active+clean","last_fresh":"2026-03-07T10:20:01.740174+0000","last_change":"2026-03-07T10:20:00.730881+0000","last_active":"2026-03-07T10:20:01.740174+0000","last_peered":"2026-03-07T10:20:01.740174+0000","last_clean":"2026-03-07T10:20:01.740174+0000","last_became_active":"2026-03-07T10:20:00.730632+0000","last_became_peered":"2026-03-07T10:20:00.730632+0000","last_unstale":"2026-03-07T10:20:01.740174+0000","last_undegraded":"2026-03-07T10:20:01.740174+0000","last_fullsized":"2026-03-07T10:20:01.740174+0000","mapping_epoch":17,"log_start":"0'0","ondisk_log_start":"0'0","created":17,"last_epoch_clean":18,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-07T10:19:59.712234+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-07T10:19:59.712234+0000","last_clean_scrub_stamp":"2026-03-07T10:19:59.712234+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-08T19:34:01.027843+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[1,2,0],"acting":[1,2,0],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":1,"acting_primary":1,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1388544,"data_stored":1377840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":2,"up_from":16,"seq":68719476739,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":437188,"kb_used_data":612,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20530236,"statfs":{"total":21470642176,"available":21022961664,"internally_reserved":0,"allocated":626688,"data_stored":509508,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[]},{"osd":1,"up_from":12,"seq":51539607557,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27584,"kb_used_data":612,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939840,"statfs":{"total":21470642176,"available":21442396160,"internally_reserved":0,"allocated":626688,"data_stored":509508,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":8,"seq":34359738375,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27588,"kb_used_data":612,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939836,"statfs":{"total":21470642176,"available":21442392064,"internally_reserved":0,"allocated":626688,"data_stored":509508,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1,2],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-07T10:20:06.257 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-07T10:20:06.257 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-07T10:20:06.257 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-07T10:20:06.257 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph health --format=json 2026-03-07T10:20:06.407 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:06.708 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-07T10:20:06.708 INFO:teuthology.orchestra.run.vm03.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-07T10:20:06.751 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-07T10:20:06.751 INFO:tasks.cephadm:Setup complete, yielding 2026-03-07T10:20:06.751 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-07T10:20:06.753 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-07T10:20:06.753 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- bash -c 'ceph mgr module enable rgw' 2026-03-07T10:20:06.900 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:06.960 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:06 vm03 ceph-mon[50786]: pgmap v45: 1 pgs: 1 active+clean; 449 KiB data, 481 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:06.960 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:06 vm03 ceph-mon[50786]: from='client.14391 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:20:06.960 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:06 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/1809018977' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-07T10:20:07.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:06 vm08 ceph-mon[55906]: pgmap v45: 1 pgs: 1 active+clean; 449 KiB data, 481 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:07.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:06 vm08 ceph-mon[55906]: from='client.14391 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:20:07.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:06 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/1809018977' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-07T10:20:07.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:06 vm06 ceph-mon[56197]: pgmap v45: 1 pgs: 1 active+clean; 449 KiB data, 481 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:07.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:06 vm06 ceph-mon[56197]: from='client.14391 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:20:07.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:06 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/1809018977' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-07T10:20:07.857 INFO:teuthology.run_tasks:Running task rgw_module.apply... 2026-03-07T10:20:07.860 INFO:tasks.rgw_module:Applying spec(s): rgw_realm: myrealm1 rgw_zone: myzone1 rgw_zonegroup: myzonegroup1 spec: rgw_frontend_port: 5500 2026-03-07T10:20:07.860 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- ceph rgw realm bootstrap -i - 2026-03-07T10:20:08.037 INFO:teuthology.orchestra.run.vm03.stdout:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:08.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:07 vm08 ceph-mon[55906]: from='client.24259 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:20:08.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:07 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/3702202491' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "rgw"}]: dispatch 2026-03-07T10:20:08.059 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:07 vm06 ceph-mon[56197]: from='client.24259 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:20:08.059 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:07 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3702202491' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "rgw"}]: dispatch 2026-03-07T10:20:08.059 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:07 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: ignoring --setuser ceph since I am not root 2026-03-07T10:20:08.059 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:07 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: ignoring --setgroup ceph since I am not root 2026-03-07T10:20:08.059 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:08 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:08.057+0000 7f20d141f100 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-07T10:20:08.066 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:07 vm03 ceph-mon[50786]: from='client.24259 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:20:08.066 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:07 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3702202491' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "rgw"}]: dispatch 2026-03-07T10:20:08.066 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:07 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: ignoring --setuser ceph since I am not root 2026-03-07T10:20:08.066 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:07 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: ignoring --setgroup ceph since I am not root 2026-03-07T10:20:08.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:08 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:08.070+0000 7fce1a135100 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-07T10:20:08.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:08 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:08.205+0000 7fce1a135100 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-07T10:20:08.468 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:08 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:08.182+0000 7f20d141f100 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-07T10:20:09.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:08 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/3702202491' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "rgw"}]': finished 2026-03-07T10:20:09.041 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:08 vm08 ceph-mon[55906]: mgrmap e16: a(active, since 74s), standbys: b 2026-03-07T10:20:09.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:08 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3702202491' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "rgw"}]': finished 2026-03-07T10:20:09.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:08 vm03 ceph-mon[50786]: mgrmap e16: a(active, since 74s), standbys: b 2026-03-07T10:20:09.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:08 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3702202491' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "rgw"}]': finished 2026-03-07T10:20:09.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:08 vm06 ceph-mon[56197]: mgrmap e16: a(active, since 74s), standbys: b 2026-03-07T10:20:09.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:09 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:09.254+0000 7fce1a135100 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-07T10:20:09.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:09 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:09.237+0000 7f20d141f100 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-07T10:20:10.403 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:10 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:10.051+0000 7f20d141f100 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-07T10:20:10.403 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:10 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:10.168+0000 7f20d141f100 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-07T10:20:10.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:10 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:10.062+0000 7fce1a135100 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-07T10:20:10.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:10 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:10.179+0000 7fce1a135100 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-07T10:20:10.717 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:10 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:10.400+0000 7f20d141f100 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-07T10:20:10.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:10 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:10.409+0000 7fce1a135100 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-07T10:20:12.412 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:12 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:12.131+0000 7fce1a135100 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-07T10:20:12.454 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:12 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:12.132+0000 7f20d141f100 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-07T10:20:12.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:12 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:12.452+0000 7f20d141f100 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:20:12.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:12 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:12.581+0000 7f20d141f100 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-07T10:20:12.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:12 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:12.695+0000 7f20d141f100 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-07T10:20:12.828 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:12 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:12.450+0000 7fce1a135100 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:20:12.828 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:12 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:12.579+0000 7fce1a135100 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-07T10:20:12.828 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:12 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:12.694+0000 7fce1a135100 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-07T10:20:13.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:12 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:12.825+0000 7fce1a135100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:20:13.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:12 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:12.941+0000 7fce1a135100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:20:13.218 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:12 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:12.827+0000 7f20d141f100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:20:13.218 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:12 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:12.944+0000 7f20d141f100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:20:13.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:13 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:13.428+0000 7f20d141f100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:20:13.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:13 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:13.571+0000 7f20d141f100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:20:13.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:13 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:13.424+0000 7fce1a135100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:20:13.912 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:13 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:13.568+0000 7fce1a135100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:20:14.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:14 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:14.246+0000 7fce1a135100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:20:14.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:14 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:14.250+0000 7f20d141f100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:20:15.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:15 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:15.243+0000 7fce1a135100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:20:15.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:15 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:15.366+0000 7fce1a135100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:20:15.662 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:15 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:15.490+0000 7fce1a135100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:20:15.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:15 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:15.256+0000 7f20d141f100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:20:15.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:15 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:15.379+0000 7f20d141f100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:20:15.718 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:15 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:15.502+0000 7f20d141f100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:20:16.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:15 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:15.745+0000 7fce1a135100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:20:16.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:15 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:15.869+0000 7fce1a135100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:20:16.180 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:15 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:15.758+0000 7f20d141f100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:20:16.181 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:15 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:15.882+0000 7f20d141f100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:20:16.467 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:16 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:16.178+0000 7f20d141f100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:20:16.507 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:16 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:16.165+0000 7fce1a135100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:20:16.872 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:16 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:16.504+0000 7fce1a135100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:20:16.883 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:16 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:16.518+0000 7f20d141f100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:20:17.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:16 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:16.869+0000 7fce1a135100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:20:17.162 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:16 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a[50997]: 2026-03-07T10:20:16.988+0000 7fce1a135100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:20:17.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: Active manager daemon a restarted 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: Activating manager daemon a 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: osdmap e20: 3 total, 3 up, 3 in 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: mgrmap e17: a(active, starting, since 0.00966827s), standbys: b 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/crt"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/key"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: Standby manager daemon b restarted 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: Standby manager daemon b started 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "b", "id": "b"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:20:17.163 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:17 vm03 ceph-mon[50786]: Manager daemon a is now available 2026-03-07T10:20:17.217 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:16 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:16.881+0000 7f20d141f100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-b[57128]: 2026-03-07T10:20:17.001+0000 7f20d141f100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: Active manager daemon a restarted 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: Activating manager daemon a 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: osdmap e20: 3 total, 3 up, 3 in 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: mgrmap e17: a(active, starting, since 0.00966827s), standbys: b 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/crt"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/key"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: Standby manager daemon b restarted 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: Standby manager daemon b started 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "b", "id": "b"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:20:17.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:17 vm06 ceph-mon[56197]: Manager daemon a is now available 2026-03-07T10:20:17.370 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: Active manager daemon a restarted 2026-03-07T10:20:17.370 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: Activating manager daemon a 2026-03-07T10:20:17.370 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: osdmap e20: 3 total, 3 up, 3 in 2026-03-07T10:20:17.370 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: mgrmap e17: a(active, starting, since 0.00966827s), standbys: b 2026-03-07T10:20:17.370 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/crt"}]: dispatch 2026-03-07T10:20:17.370 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/key"}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: Standby manager daemon b restarted 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: Standby manager daemon b started 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.? 192.168.123.106:0/3633356432' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "b", "id": "b"}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:20:17.371 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:17 vm08 ceph-mon[55906]: Manager daemon a is now available 2026-03-07T10:20:18.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:18 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:20:18.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:18 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:20:18.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:18 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:18.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:18 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:18 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:18 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:18 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:18 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:18 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:18 vm08 ceph-mon[55906]: mgrmap e18: a(active, since 1.01858s), standbys: b 2026-03-07T10:20:18.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:18 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:20:18.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:18 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:20:18.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:18 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:18.413 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:18 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.414 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:18 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.414 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:18 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.414 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:18 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.414 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:18 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.414 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:18 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.414 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:18 vm03 ceph-mon[50786]: mgrmap e18: a(active, since 1.01858s), standbys: b 2026-03-07T10:20:18.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:18 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:20:18.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:18 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:20:18.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:18 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:18.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:18 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:18 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:18 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:18 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:18 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:18 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:18.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:18 vm06 ceph-mon[56197]: mgrmap e18: a(active, since 1.01858s), standbys: b 2026-03-07T10:20:19.662 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.1", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Adjusting osd_memory_target on vm06 to 257.0M 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Unable to set osd_memory_target on vm06 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Adjusting osd_memory_target on vm03 to 257.0M 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Unable to set osd_memory_target on vm03 to 269533593: error parsing value: Value '269533593' is below minimum 939524096 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Updating vm03:/etc/ceph/ceph.conf 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Updating vm06:/etc/ceph/ceph.conf 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Updating vm08:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Updating vm06:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Updating vm03:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: osdmap e21: 3 total, 3 up, 3 in 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3543686439' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:19.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:19 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.1", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Adjusting osd_memory_target on vm06 to 257.0M 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Unable to set osd_memory_target on vm06 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Adjusting osd_memory_target on vm03 to 257.0M 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Unable to set osd_memory_target on vm03 to 269533593: error parsing value: Value '269533593' is below minimum 939524096 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Updating vm03:/etc/ceph/ceph.conf 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Updating vm06:/etc/ceph/ceph.conf 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Updating vm08:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Updating vm06:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Updating vm03:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: osdmap e21: 3 total, 3 up, 3 in 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3543686439' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:19.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:19 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.1", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Adjusting osd_memory_target on vm06 to 257.0M 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Unable to set osd_memory_target on vm06 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Adjusting osd_memory_target on vm03 to 257.0M 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Unable to set osd_memory_target on vm03 to 269533593: error parsing value: Value '269533593' is below minimum 939524096 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Updating vm03:/etc/ceph/ceph.conf 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Updating vm06:/etc/ceph/ceph.conf 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Updating vm08:/etc/ceph/ceph.conf 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Updating vm08:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Updating vm06:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Updating vm03:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.conf 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: osdmap e21: 3 total, 3 up, 3 in 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/3543686439' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:19.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:19 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: Updating vm08:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: Updating vm06:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: [07/Mar/2026:10:20:18] ENGINE Bus STARTING 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: Updating vm03:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: [07/Mar/2026:10:20:19] ENGINE Serving on http://192.168.123.103:8765 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: [07/Mar/2026:10:20:19] ENGINE Serving on https://192.168.123.103:7150 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: [07/Mar/2026:10:20:19] ENGINE Bus STARTED 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: [07/Mar/2026:10:20:19] ENGINE Client ('192.168.123.103', 36776) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: mgrmap e19: a(active, since 2s), standbys: b 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-07T10:20:20.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:20 vm08 ceph-mon[55906]: osdmap e22: 3 total, 3 up, 3 in 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: Updating vm08:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: Updating vm06:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: [07/Mar/2026:10:20:18] ENGINE Bus STARTING 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: Updating vm03:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: [07/Mar/2026:10:20:19] ENGINE Serving on http://192.168.123.103:8765 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: [07/Mar/2026:10:20:19] ENGINE Serving on https://192.168.123.103:7150 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: [07/Mar/2026:10:20:19] ENGINE Bus STARTED 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: [07/Mar/2026:10:20:19] ENGINE Client ('192.168.123.103', 36776) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: mgrmap e19: a(active, since 2s), standbys: b 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-07T10:20:20.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:20 vm03 ceph-mon[50786]: osdmap e22: 3 total, 3 up, 3 in 2026-03-07T10:20:20.967 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: Updating vm08:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:20:20.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: Updating vm06:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:20:20.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: [07/Mar/2026:10:20:18] ENGINE Bus STARTING 2026-03-07T10:20:20.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: Updating vm03:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/config/ceph.client.admin.keyring 2026-03-07T10:20:20.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:20.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: [07/Mar/2026:10:20:19] ENGINE Serving on http://192.168.123.103:8765 2026-03-07T10:20:20.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: [07/Mar/2026:10:20:19] ENGINE Serving on https://192.168.123.103:7150 2026-03-07T10:20:20.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: [07/Mar/2026:10:20:19] ENGINE Bus STARTED 2026-03-07T10:20:20.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: [07/Mar/2026:10:20:19] ENGINE Client ('192.168.123.103', 36776) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:20:20.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: mgrmap e19: a(active, since 2s), standbys: b 2026-03-07T10:20:20.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-07T10:20:20.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:20 vm06 ceph-mon[56197]: osdmap e22: 3 total, 3 up, 3 in 2026-03-07T10:20:22.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:22 vm03 ceph-mon[50786]: pgmap v7: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:22.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:22 vm03 ceph-mon[50786]: osdmap e23: 3 total, 3 up, 3 in 2026-03-07T10:20:22.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:22 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.log","app": "rgw"}]: dispatch 2026-03-07T10:20:22.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:22 vm03 ceph-mon[50786]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.log","app": "rgw"}]: dispatch 2026-03-07T10:20:22.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:22 vm03 ceph-mon[50786]: mgrmap e20: a(active, since 4s), standbys: b 2026-03-07T10:20:22.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:22 vm06 ceph-mon[56197]: pgmap v7: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:22.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:22 vm06 ceph-mon[56197]: osdmap e23: 3 total, 3 up, 3 in 2026-03-07T10:20:22.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:22 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.log","app": "rgw"}]: dispatch 2026-03-07T10:20:22.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:22 vm06 ceph-mon[56197]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.log","app": "rgw"}]: dispatch 2026-03-07T10:20:22.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:22 vm06 ceph-mon[56197]: mgrmap e20: a(active, since 4s), standbys: b 2026-03-07T10:20:22.540 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:22 vm08 ceph-mon[55906]: pgmap v7: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:22.540 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:22 vm08 ceph-mon[55906]: osdmap e23: 3 total, 3 up, 3 in 2026-03-07T10:20:22.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:22 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.log","app": "rgw"}]: dispatch 2026-03-07T10:20:22.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:22 vm08 ceph-mon[55906]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.log","app": "rgw"}]: dispatch 2026-03-07T10:20:22.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:22 vm08 ceph-mon[55906]: mgrmap e20: a(active, since 4s), standbys: b 2026-03-07T10:20:23.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:23 vm03 ceph-mon[50786]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": "myzone1.rgw.log","app": "rgw"}]': finished 2026-03-07T10:20:23.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:23 vm03 ceph-mon[50786]: osdmap e24: 3 total, 3 up, 3 in 2026-03-07T10:20:23.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:23 vm06 ceph-mon[56197]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": "myzone1.rgw.log","app": "rgw"}]': finished 2026-03-07T10:20:23.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:23 vm06 ceph-mon[56197]: osdmap e24: 3 total, 3 up, 3 in 2026-03-07T10:20:23.540 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:23 vm08 ceph-mon[55906]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": "myzone1.rgw.log","app": "rgw"}]': finished 2026-03-07T10:20:23.540 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:23 vm08 ceph-mon[55906]: osdmap e24: 3 total, 3 up, 3 in 2026-03-07T10:20:24.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:24 vm03 ceph-mon[50786]: pgmap v10: 65 pgs: 64 unknown, 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:24.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:24 vm03 ceph-mon[50786]: osdmap e25: 3 total, 3 up, 3 in 2026-03-07T10:20:24.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:24 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.control","app": "rgw"}]: dispatch 2026-03-07T10:20:24.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:24 vm03 ceph-mon[50786]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.control","app": "rgw"}]: dispatch 2026-03-07T10:20:24.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:24 vm06 ceph-mon[56197]: pgmap v10: 65 pgs: 64 unknown, 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:24.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:24 vm06 ceph-mon[56197]: osdmap e25: 3 total, 3 up, 3 in 2026-03-07T10:20:24.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:24 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.control","app": "rgw"}]: dispatch 2026-03-07T10:20:24.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:24 vm06 ceph-mon[56197]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.control","app": "rgw"}]: dispatch 2026-03-07T10:20:24.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:24 vm08 ceph-mon[55906]: pgmap v10: 65 pgs: 64 unknown, 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:20:24.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:24 vm08 ceph-mon[55906]: osdmap e25: 3 total, 3 up, 3 in 2026-03-07T10:20:24.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:24 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.control","app": "rgw"}]: dispatch 2026-03-07T10:20:24.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:24 vm08 ceph-mon[55906]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.control","app": "rgw"}]: dispatch 2026-03-07T10:20:25.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:25 vm03 ceph-mon[50786]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": "myzone1.rgw.control","app": "rgw"}]': finished 2026-03-07T10:20:25.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:25 vm03 ceph-mon[50786]: osdmap e26: 3 total, 3 up, 3 in 2026-03-07T10:20:25.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:25 vm06 ceph-mon[56197]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": "myzone1.rgw.control","app": "rgw"}]': finished 2026-03-07T10:20:25.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:25 vm06 ceph-mon[56197]: osdmap e26: 3 total, 3 up, 3 in 2026-03-07T10:20:25.540 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:25 vm08 ceph-mon[55906]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": "myzone1.rgw.control","app": "rgw"}]': finished 2026-03-07T10:20:25.540 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:25 vm08 ceph-mon[55906]: osdmap e26: 3 total, 3 up, 3 in 2026-03-07T10:20:26.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:26 vm03 ceph-mon[50786]: pgmap v13: 97 pgs: 32 unknown, 65 active+clean; 451 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 51 KiB/s rd, 3.5 KiB/s wr, 31 op/s 2026-03-07T10:20:26.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:26 vm03 ceph-mon[50786]: osdmap e27: 3 total, 3 up, 3 in 2026-03-07T10:20:26.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:26 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.meta","app": "rgw"}]: dispatch 2026-03-07T10:20:26.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:26 vm03 ceph-mon[50786]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.meta","app": "rgw"}]: dispatch 2026-03-07T10:20:26.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:26 vm06 ceph-mon[56197]: pgmap v13: 97 pgs: 32 unknown, 65 active+clean; 451 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 51 KiB/s rd, 3.5 KiB/s wr, 31 op/s 2026-03-07T10:20:26.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:26 vm06 ceph-mon[56197]: osdmap e27: 3 total, 3 up, 3 in 2026-03-07T10:20:26.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:26 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.meta","app": "rgw"}]: dispatch 2026-03-07T10:20:26.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:26 vm06 ceph-mon[56197]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.meta","app": "rgw"}]: dispatch 2026-03-07T10:20:26.540 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:26 vm08 ceph-mon[55906]: pgmap v13: 97 pgs: 32 unknown, 65 active+clean; 451 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 51 KiB/s rd, 3.5 KiB/s wr, 31 op/s 2026-03-07T10:20:26.540 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:26 vm08 ceph-mon[55906]: osdmap e27: 3 total, 3 up, 3 in 2026-03-07T10:20:26.540 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:26 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.meta","app": "rgw"}]: dispatch 2026-03-07T10:20:26.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:26 vm08 ceph-mon[55906]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool application enable","pool": "myzone1.rgw.meta","app": "rgw"}]: dispatch 2026-03-07T10:20:27.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:27 vm03 ceph-mon[50786]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": "myzone1.rgw.meta","app": "rgw"}]': finished 2026-03-07T10:20:27.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:27 vm03 ceph-mon[50786]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool set", "pool": "myzone1.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-07T10:20:27.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:27 vm03 ceph-mon[50786]: osdmap e28: 3 total, 3 up, 3 in 2026-03-07T10:20:27.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:27 vm03 ceph-mon[50786]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool set", "pool": "myzone1.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-07T10:20:27.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:27 vm03 ceph-mon[50786]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool set", "pool": "myzone1.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-07T10:20:27.412 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:27 vm03 ceph-mon[50786]: osdmap e29: 3 total, 3 up, 3 in 2026-03-07T10:20:27.464 INFO:teuthology.orchestra.run.vm03.stdout:Realm(s) created correctly. Please, use 'ceph rgw realm tokens' to get the token. 2026-03-07T10:20:27.467 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:27 vm06 ceph-mon[56197]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": "myzone1.rgw.meta","app": "rgw"}]': finished 2026-03-07T10:20:27.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:27 vm06 ceph-mon[56197]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool set", "pool": "myzone1.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-07T10:20:27.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:27 vm06 ceph-mon[56197]: osdmap e28: 3 total, 3 up, 3 in 2026-03-07T10:20:27.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:27 vm06 ceph-mon[56197]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool set", "pool": "myzone1.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-07T10:20:27.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:27 vm06 ceph-mon[56197]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool set", "pool": "myzone1.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-07T10:20:27.468 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:27 vm06 ceph-mon[56197]: osdmap e29: 3 total, 3 up, 3 in 2026-03-07T10:20:27.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:27 vm08 ceph-mon[55906]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool application enable","pool": "myzone1.rgw.meta","app": "rgw"}]': finished 2026-03-07T10:20:27.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:27 vm08 ceph-mon[55906]: from='client.? 192.168.123.103:0/3830983860' entity='mgr.a' cmd=[{"prefix": "osd pool set", "pool": "myzone1.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-07T10:20:27.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:27 vm08 ceph-mon[55906]: osdmap e28: 3 total, 3 up, 3 in 2026-03-07T10:20:27.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:27 vm08 ceph-mon[55906]: from='client.? ' entity='mgr.a' cmd=[{"prefix": "osd pool set", "pool": "myzone1.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-07T10:20:27.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:27 vm08 ceph-mon[55906]: from='client.? ' entity='mgr.a' cmd='[{"prefix": "osd pool set", "pool": "myzone1.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-07T10:20:27.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:27 vm08 ceph-mon[55906]: osdmap e29: 3 total, 3 up, 3 in 2026-03-07T10:20:27.550 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-07T10:20:27.552 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-07T10:20:27.552 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 -- bash -c 'set -e 2026-03-07T10:20:27.552 DEBUG:teuthology.orchestra.run.vm03:> set -x 2026-03-07T10:20:27.552 DEBUG:teuthology.orchestra.run.vm03:> while true; do TOKEN=$(ceph rgw realm tokens | jq -r '"'"'.[0].token'"'"'); echo $TOKEN; if [ "$TOKEN" != "master zone has no endpoint" ]; then break; fi; sleep 5; done 2026-03-07T10:20:27.552 DEBUG:teuthology.orchestra.run.vm03:> TOKENS=$(ceph rgw realm tokens) 2026-03-07T10:20:27.552 DEBUG:teuthology.orchestra.run.vm03:> echo $TOKENS | jq --exit-status '"'"'.[0].realm == "myrealm1"'"'"' 2026-03-07T10:20:27.552 DEBUG:teuthology.orchestra.run.vm03:> echo $TOKENS | jq --exit-status '"'"'.[0].token'"'"' 2026-03-07T10:20:27.552 DEBUG:teuthology.orchestra.run.vm03:> TOKEN_JSON=$(ceph rgw realm tokens | jq -r '"'"'.[0].token'"'"' | base64 --decode) 2026-03-07T10:20:27.552 DEBUG:teuthology.orchestra.run.vm03:> echo $TOKEN_JSON | jq --exit-status '"'"'.realm_name == "myrealm1"'"'"' 2026-03-07T10:20:27.552 DEBUG:teuthology.orchestra.run.vm03:> echo $TOKEN_JSON | jq --exit-status '"'"'.endpoint | test("http://.+:\\d+")'"'"' 2026-03-07T10:20:27.553 DEBUG:teuthology.orchestra.run.vm03:> echo $TOKEN_JSON | jq --exit-status '"'"'.realm_id | test("^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$")'"'"' 2026-03-07T10:20:27.553 DEBUG:teuthology.orchestra.run.vm03:> echo $TOKEN_JSON | jq --exit-status '"'"'.access_key'"'"' 2026-03-07T10:20:27.553 DEBUG:teuthology.orchestra.run.vm03:> echo $TOKEN_JSON | jq --exit-status '"'"'.secret'"'"' 2026-03-07T10:20:27.553 DEBUG:teuthology.orchestra.run.vm03:> ' 2026-03-07T10:20:27.733 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/mon.a/config 2026-03-07T10:20:27.811 INFO:teuthology.orchestra.run.vm03.stderr:+ true 2026-03-07T10:20:27.811 INFO:teuthology.orchestra.run.vm03.stderr:++ ceph rgw realm tokens 2026-03-07T10:20:27.813 INFO:teuthology.orchestra.run.vm03.stderr:++ jq -r '.[0].token' 2026-03-07T10:20:28.633 INFO:teuthology.orchestra.run.vm03.stderr:+ TOKEN='master zone has no endpoint' 2026-03-07T10:20:28.634 INFO:teuthology.orchestra.run.vm03.stderr:+ echo master zone has no endpoint 2026-03-07T10:20:28.634 INFO:teuthology.orchestra.run.vm03.stdout:master zone has no endpoint 2026-03-07T10:20:28.634 INFO:teuthology.orchestra.run.vm03.stderr:+ '[' 'master zone has no endpoint' '!=' 'master zone has no endpoint' ']' 2026-03-07T10:20:28.634 INFO:teuthology.orchestra.run.vm03.stderr:+ sleep 5 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: pgmap v16: 129 pgs: 64 unknown, 65 active+clean; 451 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 51 KiB/s rd, 3.5 KiB/s wr, 31 op/s 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: Saving service rgw.myrealm1.myzone1 spec with placement count:2 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm06.kgepvx", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm06.kgepvx", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: Deploying daemon rgw.myrealm1.myzone1.vm06.kgepvx on vm06 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm08.rkfwlo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm08.rkfwlo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:28 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: pgmap v16: 129 pgs: 64 unknown, 65 active+clean; 451 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 51 KiB/s rd, 3.5 KiB/s wr, 31 op/s 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: Saving service rgw.myrealm1.myzone1 spec with placement count:2 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm06.kgepvx", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm06.kgepvx", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: Deploying daemon rgw.myrealm1.myzone1.vm06.kgepvx on vm06 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.776 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm08.rkfwlo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-07T10:20:28.777 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm08.rkfwlo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-07T10:20:28.777 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.777 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:28 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:28.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: pgmap v16: 129 pgs: 64 unknown, 65 active+clean; 451 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 51 KiB/s rd, 3.5 KiB/s wr, 31 op/s 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: Saving service rgw.myrealm1.myzone1 spec with placement count:2 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm06.kgepvx", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm06.kgepvx", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: Deploying daemon rgw.myrealm1.myzone1.vm06.kgepvx on vm06 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm08.rkfwlo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.myrealm1.myzone1.vm08.rkfwlo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:28.913 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:28 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:29.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:29 vm08 ceph-mon[55906]: from='client.14634 -' entity='client.admin' cmd=[{"prefix": "rgw realm tokens", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:20:29.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:29 vm08 ceph-mon[55906]: Deploying daemon rgw.myrealm1.myzone1.vm08.rkfwlo on vm08 2026-03-07T10:20:29.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:29 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:29.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:29 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:29.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:29 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:29.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:29 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "rgw zone modify", "realm_name": "myrealm1", "zonegroup_name": "myzonegroup1", "zone_name": "myzone1", "realm_token": "ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiBudWxsLAogICAgImFjY2Vzc19rZXkiOiAiMEs4TlRSVVcwRkJFOVMwVlRKNFkiLAogICAgInNlY3JldCI6ICJvSkxvbGF1eTY0WVFTdTFMZGc4RDNsdDBFd0JuZG5PV2t0UnhBdjZQIgp9", "zone_endpoints": ["http://192.168.123.106:5500", "http://192.168.123.108:5500"]}]: dispatch 2026-03-07T10:20:29.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:29 vm03 ceph-mon[50786]: from='client.14634 -' entity='client.admin' cmd=[{"prefix": "rgw realm tokens", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:20:29.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:29 vm03 ceph-mon[50786]: Deploying daemon rgw.myrealm1.myzone1.vm08.rkfwlo on vm08 2026-03-07T10:20:29.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:29 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:29.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:29 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:29.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:29 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:29.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:29 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "rgw zone modify", "realm_name": "myrealm1", "zonegroup_name": "myzonegroup1", "zone_name": "myzone1", "realm_token": "ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiBudWxsLAogICAgImFjY2Vzc19rZXkiOiAiMEs4TlRSVVcwRkJFOVMwVlRKNFkiLAogICAgInNlY3JldCI6ICJvSkxvbGF1eTY0WVFTdTFMZGc4RDNsdDBFd0JuZG5PV2t0UnhBdjZQIgp9", "zone_endpoints": ["http://192.168.123.106:5500", "http://192.168.123.108:5500"]}]: dispatch 2026-03-07T10:20:29.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:29 vm06 ceph-mon[56197]: from='client.14634 -' entity='client.admin' cmd=[{"prefix": "rgw realm tokens", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:20:29.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:29 vm06 ceph-mon[56197]: Deploying daemon rgw.myrealm1.myzone1.vm08.rkfwlo on vm08 2026-03-07T10:20:29.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:29 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:29.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:29 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:29.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:29 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:29.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:29 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "rgw zone modify", "realm_name": "myrealm1", "zonegroup_name": "myzonegroup1", "zone_name": "myzone1", "realm_token": "ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiBudWxsLAogICAgImFjY2Vzc19rZXkiOiAiMEs4TlRSVVcwRkJFOVMwVlRKNFkiLAogICAgInNlY3JldCI6ICJvSkxvbGF1eTY0WVFTdTFMZGc4RDNsdDBFd0JuZG5PV2t0UnhBdjZQIgp9", "zone_endpoints": ["http://192.168.123.106:5500", "http://192.168.123.108:5500"]}]: dispatch 2026-03-07T10:20:30.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:30 vm08 ceph-mon[55906]: pgmap v18: 129 pgs: 129 active+clean; 455 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 16 KiB/s rd, 5.1 KiB/s wr, 22 op/s 2026-03-07T10:20:30.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:30 vm08 ceph-mon[55906]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "rgw zone modify", "realm_name": "myrealm1", "zonegroup_name": "myzonegroup1", "zone_name": "myzone1", "realm_token": "ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiBudWxsLAogICAgImFjY2Vzc19rZXkiOiAiMEs4TlRSVVcwRkJFOVMwVlRKNFkiLAogICAgInNlY3JldCI6ICJvSkxvbGF1eTY0WVFTdTFMZGc4RDNsdDBFd0JuZG5PV2t0UnhBdjZQIgp9", "zone_endpoints": ["http://192.168.123.106:5500", "http://192.168.123.108:5500"]}]: dispatch 2026-03-07T10:20:30.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:30 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:30.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:30 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:30.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:30 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:30.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:30 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:30.541 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:30 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:30.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:30 vm06 ceph-mon[56197]: pgmap v18: 129 pgs: 129 active+clean; 455 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 16 KiB/s rd, 5.1 KiB/s wr, 22 op/s 2026-03-07T10:20:30.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:30 vm06 ceph-mon[56197]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "rgw zone modify", "realm_name": "myrealm1", "zonegroup_name": "myzonegroup1", "zone_name": "myzone1", "realm_token": "ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiBudWxsLAogICAgImFjY2Vzc19rZXkiOiAiMEs4TlRSVVcwRkJFOVMwVlRKNFkiLAogICAgInNlY3JldCI6ICJvSkxvbGF1eTY0WVFTdTFMZGc4RDNsdDBFd0JuZG5PV2t0UnhBdjZQIgp9", "zone_endpoints": ["http://192.168.123.106:5500", "http://192.168.123.108:5500"]}]: dispatch 2026-03-07T10:20:30.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:30 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:30.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:30 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:30.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:30 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:30.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:30 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:30.718 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:30 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:30.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:30 vm03 ceph-mon[50786]: pgmap v18: 129 pgs: 129 active+clean; 455 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 16 KiB/s rd, 5.1 KiB/s wr, 22 op/s 2026-03-07T10:20:30.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:30 vm03 ceph-mon[50786]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "rgw zone modify", "realm_name": "myrealm1", "zonegroup_name": "myzonegroup1", "zone_name": "myzone1", "realm_token": "ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiBudWxsLAogICAgImFjY2Vzc19rZXkiOiAiMEs4TlRSVVcwRkJFOVMwVlRKNFkiLAogICAgInNlY3JldCI6ICJvSkxvbGF1eTY0WVFTdTFMZGc4RDNsdDBFd0JuZG5PV2t0UnhBdjZQIgp9", "zone_endpoints": ["http://192.168.123.106:5500", "http://192.168.123.108:5500"]}]: dispatch 2026-03-07T10:20:30.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:30 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:30.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:30 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:30.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:30 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:30.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:30 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:30.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:30 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: Saving service rgw.myrealm1.myzone1 spec with placement count:2 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: Checking dashboard <-> RGW credentials 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:31.791 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:31 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: Saving service rgw.myrealm1.myzone1 spec with placement count:2 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: Checking dashboard <-> RGW credentials 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:31.912 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:31 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: Saving service rgw.myrealm1.myzone1 spec with placement count:2 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: Checking dashboard <-> RGW credentials 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:20:31.968 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:31 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:33.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:32 vm03 ceph-mon[50786]: Checking dashboard <-> RGW credentials 2026-03-07T10:20:33.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:32 vm03 ceph-mon[50786]: pgmap v19: 129 pgs: 129 active+clean; 455 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 4.2 KiB/s wr, 18 op/s 2026-03-07T10:20:33.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:32 vm03 ceph-mon[50786]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:33.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:32 vm06 ceph-mon[56197]: Checking dashboard <-> RGW credentials 2026-03-07T10:20:33.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:32 vm06 ceph-mon[56197]: pgmap v19: 129 pgs: 129 active+clean; 455 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 4.2 KiB/s wr, 18 op/s 2026-03-07T10:20:33.217 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:32 vm06 ceph-mon[56197]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:33.290 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:32 vm08 ceph-mon[55906]: Checking dashboard <-> RGW credentials 2026-03-07T10:20:33.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:32 vm08 ceph-mon[55906]: pgmap v19: 129 pgs: 129 active+clean; 455 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 13 KiB/s rd, 4.2 KiB/s wr, 18 op/s 2026-03-07T10:20:33.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:32 vm08 ceph-mon[55906]: from='mgr.24274 192.168.123.103:0/2484199533' entity='mgr.a' 2026-03-07T10:20:33.636 INFO:teuthology.orchestra.run.vm03.stderr:+ true 2026-03-07T10:20:33.637 INFO:teuthology.orchestra.run.vm03.stderr:++ ceph rgw realm tokens 2026-03-07T10:20:33.637 INFO:teuthology.orchestra.run.vm03.stderr:++ jq -r '.[0].token' 2026-03-07T10:20:34.217 INFO:teuthology.orchestra.run.vm03.stderr:+ TOKEN=ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiAiaHR0cDovLzE5Mi4xNjguMTIzLjEwNjo1NTAwIiwKICAgICJhY2Nlc3Nfa2V5IjogIjBLOE5UUlVXMEZCRTlTMFZUSjRZIiwKICAgICJzZWNyZXQiOiAib0pMb2xhdXk2NFlRU3UxTGRnOEQzbHQwRXdCbmRuT1drdFJ4QXY2UCIKfQ== 2026-03-07T10:20:34.217 INFO:teuthology.orchestra.run.vm03.stderr:+ echo ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiAiaHR0cDovLzE5Mi4xNjguMTIzLjEwNjo1NTAwIiwKICAgICJhY2Nlc3Nfa2V5IjogIjBLOE5UUlVXMEZCRTlTMFZUSjRZIiwKICAgICJzZWNyZXQiOiAib0pMb2xhdXk2NFlRU3UxTGRnOEQzbHQwRXdCbmRuT1drdFJ4QXY2UCIKfQ== 2026-03-07T10:20:34.217 INFO:teuthology.orchestra.run.vm03.stdout:ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiAiaHR0cDovLzE5Mi4xNjguMTIzLjEwNjo1NTAwIiwKICAgICJhY2Nlc3Nfa2V5IjogIjBLOE5UUlVXMEZCRTlTMFZUSjRZIiwKICAgICJzZWNyZXQiOiAib0pMb2xhdXk2NFlRU3UxTGRnOEQzbHQwRXdCbmRuT1drdFJ4QXY2UCIKfQ== 2026-03-07T10:20:34.217 INFO:teuthology.orchestra.run.vm03.stderr:+ '[' ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiAiaHR0cDovLzE5Mi4xNjguMTIzLjEwNjo1NTAwIiwKICAgICJhY2Nlc3Nfa2V5IjogIjBLOE5UUlVXMEZCRTlTMFZUSjRZIiwKICAgICJzZWNyZXQiOiAib0pMb2xhdXk2NFlRU3UxTGRnOEQzbHQwRXdCbmRuT1drdFJ4QXY2UCIKfQ== '!=' 'master zone has no endpoint' ']' 2026-03-07T10:20:34.217 INFO:teuthology.orchestra.run.vm03.stderr:+ break 2026-03-07T10:20:34.217 INFO:teuthology.orchestra.run.vm03.stderr:++ ceph rgw realm tokens 2026-03-07T10:20:34.794 INFO:teuthology.orchestra.run.vm03.stderr:+ TOKENS='[ 2026-03-07T10:20:34.794 INFO:teuthology.orchestra.run.vm03.stderr: { 2026-03-07T10:20:34.794 INFO:teuthology.orchestra.run.vm03.stderr: "realm": "myrealm1", 2026-03-07T10:20:34.794 INFO:teuthology.orchestra.run.vm03.stderr: "token": "ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiAiaHR0cDovLzE5Mi4xNjguMTIzLjEwNjo1NTAwIiwKICAgICJhY2Nlc3Nfa2V5IjogIjBLOE5UUlVXMEZCRTlTMFZUSjRZIiwKICAgICJzZWNyZXQiOiAib0pMb2xhdXk2NFlRU3UxTGRnOEQzbHQwRXdCbmRuT1drdFJ4QXY2UCIKfQ==" 2026-03-07T10:20:34.794 INFO:teuthology.orchestra.run.vm03.stderr: } 2026-03-07T10:20:34.794 INFO:teuthology.orchestra.run.vm03.stderr:]' 2026-03-07T10:20:34.794 INFO:teuthology.orchestra.run.vm03.stderr:+ jq --exit-status '.[0].realm == "myrealm1"' 2026-03-07T10:20:34.794 INFO:teuthology.orchestra.run.vm03.stderr:+ echo '[' '{' '"realm":' '"myrealm1",' '"token":' '"ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiAiaHR0cDovLzE5Mi4xNjguMTIzLjEwNjo1NTAwIiwKICAgICJhY2Nlc3Nfa2V5IjogIjBLOE5UUlVXMEZCRTlTMFZUSjRZIiwKICAgICJzZWNyZXQiOiAib0pMb2xhdXk2NFlRU3UxTGRnOEQzbHQwRXdCbmRuT1drdFJ4QXY2UCIKfQ=="' '}' ']' 2026-03-07T10:20:34.795 INFO:teuthology.orchestra.run.vm03.stdout:true 2026-03-07T10:20:34.796 INFO:teuthology.orchestra.run.vm03.stderr:+ echo '[' '{' '"realm":' '"myrealm1",' '"token":' '"ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiAiaHR0cDovLzE5Mi4xNjguMTIzLjEwNjo1NTAwIiwKICAgICJhY2Nlc3Nfa2V5IjogIjBLOE5UUlVXMEZCRTlTMFZUSjRZIiwKICAgICJzZWNyZXQiOiAib0pMb2xhdXk2NFlRU3UxTGRnOEQzbHQwRXdCbmRuT1drdFJ4QXY2UCIKfQ=="' '}' ']' 2026-03-07T10:20:34.796 INFO:teuthology.orchestra.run.vm03.stderr:+ jq --exit-status '.[0].token' 2026-03-07T10:20:34.797 INFO:teuthology.orchestra.run.vm03.stdout:"ewogICAgInJlYWxtX25hbWUiOiAibXlyZWFsbTEiLAogICAgInJlYWxtX2lkIjogImVlNzM2YjA2LWY2MjEtNDNlNS05ODdlLTE1YTljODQ2YjAxZCIsCiAgICAiZW5kcG9pbnQiOiAiaHR0cDovLzE5Mi4xNjguMTIzLjEwNjo1NTAwIiwKICAgICJhY2Nlc3Nfa2V5IjogIjBLOE5UUlVXMEZCRTlTMFZUSjRZIiwKICAgICJzZWNyZXQiOiAib0pMb2xhdXk2NFlRU3UxTGRnOEQzbHQwRXdCbmRuT1drdFJ4QXY2UCIKfQ==" 2026-03-07T10:20:34.799 INFO:teuthology.orchestra.run.vm03.stderr:++ ceph rgw realm tokens 2026-03-07T10:20:34.799 INFO:teuthology.orchestra.run.vm03.stderr:++ base64 --decode 2026-03-07T10:20:34.799 INFO:teuthology.orchestra.run.vm03.stderr:++ jq -r '.[0].token' 2026-03-07T10:20:35.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:34 vm03 ceph-mon[50786]: pgmap v20: 129 pgs: 129 active+clean; 455 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 3.2 KiB/s wr, 14 op/s 2026-03-07T10:20:35.162 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 07 10:20:34 vm03 ceph-mon[50786]: from='client.14940 -' entity='client.admin' cmd=[{"prefix": "rgw realm tokens", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:20:35.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:34 vm06 ceph-mon[56197]: pgmap v20: 129 pgs: 129 active+clean; 455 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 3.2 KiB/s wr, 14 op/s 2026-03-07T10:20:35.218 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:34 vm06 ceph-mon[56197]: from='client.14940 -' entity='client.admin' cmd=[{"prefix": "rgw realm tokens", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:20:35.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:34 vm08 ceph-mon[55906]: pgmap v20: 129 pgs: 129 active+clean; 455 KiB data, 81 MiB used, 60 GiB / 60 GiB avail; 10 KiB/s rd, 3.2 KiB/s wr, 14 op/s 2026-03-07T10:20:35.291 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:34 vm08 ceph-mon[55906]: from='client.14940 -' entity='client.admin' cmd=[{"prefix": "rgw realm tokens", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:20:35.376 INFO:teuthology.orchestra.run.vm03.stderr:+ TOKEN_JSON='{ 2026-03-07T10:20:35.377 INFO:teuthology.orchestra.run.vm03.stderr: "realm_name": "myrealm1", 2026-03-07T10:20:35.377 INFO:teuthology.orchestra.run.vm03.stderr: "realm_id": "ee736b06-f621-43e5-987e-15a9c846b01d", 2026-03-07T10:20:35.377 INFO:teuthology.orchestra.run.vm03.stderr: "endpoint": "http://192.168.123.106:5500", 2026-03-07T10:20:35.377 INFO:teuthology.orchestra.run.vm03.stderr: "access_key": "0K8NTRUW0FBE9S0VTJ4Y", 2026-03-07T10:20:35.377 INFO:teuthology.orchestra.run.vm03.stderr: "secret": "oJLolauy64YQSu1Ldg8D3lt0EwBndnOWktRxAv6P" 2026-03-07T10:20:35.377 INFO:teuthology.orchestra.run.vm03.stderr:}' 2026-03-07T10:20:35.377 INFO:teuthology.orchestra.run.vm03.stderr:+ jq --exit-status '.realm_name == "myrealm1"' 2026-03-07T10:20:35.377 INFO:teuthology.orchestra.run.vm03.stderr:+ echo '{' '"realm_name":' '"myrealm1",' '"realm_id":' '"ee736b06-f621-43e5-987e-15a9c846b01d",' '"endpoint":' '"http://192.168.123.106:5500",' '"access_key":' '"0K8NTRUW0FBE9S0VTJ4Y",' '"secret":' '"oJLolauy64YQSu1Ldg8D3lt0EwBndnOWktRxAv6P"' '}' 2026-03-07T10:20:35.378 INFO:teuthology.orchestra.run.vm03.stdout:true 2026-03-07T10:20:35.378 INFO:teuthology.orchestra.run.vm03.stderr:+ echo '{' '"realm_name":' '"myrealm1",' '"realm_id":' '"ee736b06-f621-43e5-987e-15a9c846b01d",' '"endpoint":' '"http://192.168.123.106:5500",' '"access_key":' '"0K8NTRUW0FBE9S0VTJ4Y",' '"secret":' '"oJLolauy64YQSu1Ldg8D3lt0EwBndnOWktRxAv6P"' '}' 2026-03-07T10:20:35.378 INFO:teuthology.orchestra.run.vm03.stderr:+ jq --exit-status '.endpoint | test("http://.+:\\d+")' 2026-03-07T10:20:35.380 INFO:teuthology.orchestra.run.vm03.stdout:true 2026-03-07T10:20:35.380 INFO:teuthology.orchestra.run.vm03.stderr:+ echo '{' '"realm_name":' '"myrealm1",' '"realm_id":' '"ee736b06-f621-43e5-987e-15a9c846b01d",' '"endpoint":' '"http://192.168.123.106:5500",' '"access_key":' '"0K8NTRUW0FBE9S0VTJ4Y",' '"secret":' '"oJLolauy64YQSu1Ldg8D3lt0EwBndnOWktRxAv6P"' '}' 2026-03-07T10:20:35.380 INFO:teuthology.orchestra.run.vm03.stderr:+ jq --exit-status '.realm_id | test("^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$")' 2026-03-07T10:20:35.382 INFO:teuthology.orchestra.run.vm03.stdout:true 2026-03-07T10:20:35.383 INFO:teuthology.orchestra.run.vm03.stderr:+ echo '{' '"realm_name":' '"myrealm1",' '"realm_id":' '"ee736b06-f621-43e5-987e-15a9c846b01d",' '"endpoint":' '"http://192.168.123.106:5500",' '"access_key":' '"0K8NTRUW0FBE9S0VTJ4Y",' '"secret":' '"oJLolauy64YQSu1Ldg8D3lt0EwBndnOWktRxAv6P"' '}' 2026-03-07T10:20:35.383 INFO:teuthology.orchestra.run.vm03.stderr:+ jq --exit-status .access_key 2026-03-07T10:20:35.384 INFO:teuthology.orchestra.run.vm03.stdout:"0K8NTRUW0FBE9S0VTJ4Y" 2026-03-07T10:20:35.384 INFO:teuthology.orchestra.run.vm03.stderr:+ echo '{' '"realm_name":' '"myrealm1",' '"realm_id":' '"ee736b06-f621-43e5-987e-15a9c846b01d",' '"endpoint":' '"http://192.168.123.106:5500",' '"access_key":' '"0K8NTRUW0FBE9S0VTJ4Y",' '"secret":' '"oJLolauy64YQSu1Ldg8D3lt0EwBndnOWktRxAv6P"' '}' 2026-03-07T10:20:35.385 INFO:teuthology.orchestra.run.vm03.stderr:+ jq --exit-status .secret 2026-03-07T10:20:35.386 INFO:teuthology.orchestra.run.vm03.stdout:"oJLolauy64YQSu1Ldg8D3lt0EwBndnOWktRxAv6P" 2026-03-07T10:20:35.455 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-07T10:20:35.458 INFO:tasks.cephadm:Teardown begin 2026-03-07T10:20:35.458 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:35.488 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:35.516 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:20:35.545 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-07T10:20:35.546 DEBUG:teuthology.orchestra.run.vm03:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-07T10:20:35.564 DEBUG:teuthology.orchestra.run.vm06:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-07T10:20:35.581 DEBUG:teuthology.orchestra.run.vm08:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-07T10:20:35.601 INFO:tasks.cephadm:Stopping all daemons... 2026-03-07T10:20:35.601 INFO:tasks.cephadm.mon.a:Stopping mon.a... 2026-03-07T10:20:35.601 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.a 2026-03-07T10:20:35.836 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.a.service' 2026-03-07T10:20:35.904 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:20:35.904 INFO:tasks.cephadm.mon.a:Stopped mon.a 2026-03-07T10:20:35.904 INFO:tasks.cephadm.mon.c:Stopping mon.b... 2026-03-07T10:20:35.905 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.b 2026-03-07T10:20:36.179 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:35 vm06 systemd[1]: Stopping Ceph mon.b for d33fdf60-1a0e-11f1-a719-83e365122cb4... 2026-03-07T10:20:36.179 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:36 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mon-b[56193]: 2026-03-07T10:20:36.022+0000 7f77fec3b640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:20:36.179 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:36 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mon-b[56193]: 2026-03-07T10:20:36.022+0000 7f77fec3b640 -1 mon.b@2(peon) e3 *** Got Signal Terminated *** 2026-03-07T10:20:36.180 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:36 vm06 podman[65316]: 2026-03-07 10:20:36.045588009 +0000 UTC m=+0.038359437 container died 502b7928225e9be25e0db42b6f458233e6fc958a2d1f5fea8a179d7646e4cb7d (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mon-b, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:20:36.180 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:36 vm06 podman[65316]: 2026-03-07 10:20:36.119633518 +0000 UTC m=+0.112404946 container remove 502b7928225e9be25e0db42b6f458233e6fc958a2d1f5fea8a179d7646e4cb7d (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mon-b, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:20:36.180 INFO:journalctl@ceph.mon.b.vm06.stdout:Mar 07 10:20:36 vm06 bash[65316]: ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mon-b 2026-03-07T10:20:36.184 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.b.service' 2026-03-07T10:20:36.226 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:20:36.226 INFO:tasks.cephadm.mon.c:Stopped mon.b 2026-03-07T10:20:36.226 INFO:tasks.cephadm.mon.c:Stopping mon.c... 2026-03-07T10:20:36.226 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.c 2026-03-07T10:20:36.479 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.c.service' 2026-03-07T10:20:36.501 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:36 vm08 systemd[1]: Stopping Ceph mon.c for d33fdf60-1a0e-11f1-a719-83e365122cb4... 2026-03-07T10:20:36.501 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:36 vm08 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mon-c[55899]: 2026-03-07T10:20:36.334+0000 7ffa6abf0640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:20:36.501 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:36 vm08 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mon-c[55899]: 2026-03-07T10:20:36.334+0000 7ffa6abf0640 -1 mon.c@1(peon) e3 *** Got Signal Terminated *** 2026-03-07T10:20:36.501 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:36 vm08 podman[65038]: 2026-03-07 10:20:36.390744708 +0000 UTC m=+0.074509368 container died c1d63ff7451762648134915b20284e2758923daa3f36e7cfea7ee4537e05f570 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mon-c, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-07T10:20:36.501 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:36 vm08 podman[65038]: 2026-03-07 10:20:36.413957362 +0000 UTC m=+0.097722022 container remove c1d63ff7451762648134915b20284e2758923daa3f36e7cfea7ee4537e05f570 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mon-c, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:20:36.501 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:36 vm08 bash[65038]: ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mon-c 2026-03-07T10:20:36.501 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:36 vm08 systemd[1]: ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.c.service: Deactivated successfully. 2026-03-07T10:20:36.502 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:36 vm08 systemd[1]: Stopped Ceph mon.c for d33fdf60-1a0e-11f1-a719-83e365122cb4. 2026-03-07T10:20:36.502 INFO:journalctl@ceph.mon.c.vm08.stdout:Mar 07 10:20:36 vm08 systemd[1]: ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mon.c.service: Consumed 1.561s CPU time. 2026-03-07T10:20:36.517 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:20:36.517 INFO:tasks.cephadm.mon.c:Stopped mon.c 2026-03-07T10:20:36.517 INFO:tasks.cephadm.mgr.a:Stopping mgr.a... 2026-03-07T10:20:36.517 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.a 2026-03-07T10:20:36.773 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.a.service' 2026-03-07T10:20:36.798 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:36 vm03 systemd[1]: Stopping Ceph mgr.a for d33fdf60-1a0e-11f1-a719-83e365122cb4... 2026-03-07T10:20:36.798 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:36 vm03 podman[73130]: 2026-03-07 10:20:36.670273475 +0000 UTC m=+0.062824298 container died bfab8599b3b1042a1da31a3c63896755af8f238bf4ff70277c73a35faf3d7f49 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:20:36.799 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:36 vm03 podman[73130]: 2026-03-07 10:20:36.712491405 +0000 UTC m=+0.105042228 container remove bfab8599b3b1042a1da31a3c63896755af8f238bf4ff70277c73a35faf3d7f49 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:20:36.799 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:36 vm03 bash[73130]: ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-mgr-a 2026-03-07T10:20:36.799 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:36 vm03 systemd[1]: ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.a.service: Deactivated successfully. 2026-03-07T10:20:36.799 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:36 vm03 systemd[1]: Stopped Ceph mgr.a for d33fdf60-1a0e-11f1-a719-83e365122cb4. 2026-03-07T10:20:36.799 INFO:journalctl@ceph.mgr.a.vm03.stdout:Mar 07 10:20:36 vm03 systemd[1]: ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.a.service: Consumed 51.295s CPU time. 2026-03-07T10:20:36.811 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:20:36.811 INFO:tasks.cephadm.mgr.a:Stopped mgr.a 2026-03-07T10:20:36.811 INFO:tasks.cephadm.mgr.b:Stopping mgr.b... 2026-03-07T10:20:36.811 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.b 2026-03-07T10:20:36.968 INFO:journalctl@ceph.mgr.b.vm06.stdout:Mar 07 10:20:36 vm06 systemd[1]: Stopping Ceph mgr.b for d33fdf60-1a0e-11f1-a719-83e365122cb4... 2026-03-07T10:20:37.076 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@mgr.b.service' 2026-03-07T10:20:37.110 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:20:37.111 INFO:tasks.cephadm.mgr.b:Stopped mgr.b 2026-03-07T10:20:37.111 INFO:tasks.cephadm.osd.0:Stopping osd.0... 2026-03-07T10:20:37.111 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@osd.0 2026-03-07T10:20:37.412 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:37 vm03 systemd[1]: Stopping Ceph osd.0 for d33fdf60-1a0e-11f1-a719-83e365122cb4... 2026-03-07T10:20:37.412 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:37 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0[62416]: 2026-03-07T10:20:37.211+0000 7fdc8b40c640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:20:37.412 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:37 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0[62416]: 2026-03-07T10:20:37.211+0000 7fdc8b40c640 -1 osd.0 29 *** Got signal Terminated *** 2026-03-07T10:20:37.412 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:37 vm03 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0[62416]: 2026-03-07T10:20:37.211+0000 7fdc8b40c640 -1 osd.0 29 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-07T10:20:42.507 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:42 vm03 podman[73233]: 2026-03-07 10:20:42.239337486 +0000 UTC m=+5.041957853 container died 915d34b6a0665e7bb63a1b8fc9c2ff17fd10a84bbc6e9675bfb85305a1abf365 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:20:42.507 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:42 vm03 podman[73233]: 2026-03-07 10:20:42.266552819 +0000 UTC m=+5.069173186 container remove 915d34b6a0665e7bb63a1b8fc9c2ff17fd10a84bbc6e9675bfb85305a1abf365 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:20:42.507 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:42 vm03 bash[73233]: ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0 2026-03-07T10:20:42.507 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:42 vm03 podman[73299]: 2026-03-07 10:20:42.413653318 +0000 UTC m=+0.019112945 container create 85089bd09c5de174eaea64ce2fd1fd71a1de858a1c2e0cb026ba9c63c06fde1e (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:20:42.507 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:42 vm03 podman[73299]: 2026-03-07 10:20:42.452792462 +0000 UTC m=+0.058252089 container init 85089bd09c5de174eaea64ce2fd1fd71a1de858a1c2e0cb026ba9c63c06fde1e (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0-deactivate, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:20:42.507 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:42 vm03 podman[73299]: 2026-03-07 10:20:42.456476667 +0000 UTC m=+0.061936294 container start 85089bd09c5de174eaea64ce2fd1fd71a1de858a1c2e0cb026ba9c63c06fde1e (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0-deactivate, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:20:42.507 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:42 vm03 podman[73299]: 2026-03-07 10:20:42.457562658 +0000 UTC m=+0.063022285 container attach 85089bd09c5de174eaea64ce2fd1fd71a1de858a1c2e0cb026ba9c63c06fde1e (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0-deactivate, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:20:42.507 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:42 vm03 podman[73299]: 2026-03-07 10:20:42.406718872 +0000 UTC m=+0.012178499 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:20:42.877 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 07 10:20:42 vm03 podman[73299]: 2026-03-07 10:20:42.874996091 +0000 UTC m=+0.480455718 container died 85089bd09c5de174eaea64ce2fd1fd71a1de858a1c2e0cb026ba9c63c06fde1e (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-0-deactivate, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:20:42.912 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@osd.0.service' 2026-03-07T10:20:42.950 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:20:42.950 INFO:tasks.cephadm.osd.0:Stopped osd.0 2026-03-07T10:20:42.950 INFO:tasks.cephadm.osd.1:Stopping osd.1... 2026-03-07T10:20:42.950 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@osd.1 2026-03-07T10:20:43.467 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:42 vm06 systemd[1]: Stopping Ceph osd.1 for d33fdf60-1a0e-11f1-a719-83e365122cb4... 2026-03-07T10:20:43.468 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:43 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1[59855]: 2026-03-07T10:20:43.048+0000 7fdcd6cd2640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:20:43.468 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:43 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1[59855]: 2026-03-07T10:20:43.048+0000 7fdcd6cd2640 -1 osd.1 29 *** Got signal Terminated *** 2026-03-07T10:20:43.468 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:43 vm06 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1[59855]: 2026-03-07T10:20:43.048+0000 7fdcd6cd2640 -1 osd.1 29 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-07T10:20:48.337 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:48 vm06 podman[65521]: 2026-03-07 10:20:48.080668573 +0000 UTC m=+5.048557746 container died 78f3b953305f6e7b569991ed4c8c23dd114441933edc33add9969c6c67044a74 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:20:48.337 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:48 vm06 podman[65521]: 2026-03-07 10:20:48.102212916 +0000 UTC m=+5.070102089 container remove 78f3b953305f6e7b569991ed4c8c23dd114441933edc33add9969c6c67044a74 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:20:48.337 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:48 vm06 bash[65521]: ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1 2026-03-07T10:20:48.337 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:48 vm06 podman[65591]: 2026-03-07 10:20:48.242946034 +0000 UTC m=+0.017082314 container create b2acd385f4ca10229be338584e5c4811c3989225e5e242d0b902534c95d627e9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:20:48.337 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:48 vm06 podman[65591]: 2026-03-07 10:20:48.28202762 +0000 UTC m=+0.056163900 container init b2acd385f4ca10229be338584e5c4811c3989225e5e242d0b902534c95d627e9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1-deactivate, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:20:48.337 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:48 vm06 podman[65591]: 2026-03-07 10:20:48.285658345 +0000 UTC m=+0.059794625 container start b2acd385f4ca10229be338584e5c4811c3989225e5e242d0b902534c95d627e9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1-deactivate, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:20:48.337 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:48 vm06 podman[65591]: 2026-03-07 10:20:48.288647558 +0000 UTC m=+0.062783838 container attach b2acd385f4ca10229be338584e5c4811c3989225e5e242d0b902534c95d627e9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1-deactivate, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:20:48.680 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:48 vm06 podman[65591]: 2026-03-07 10:20:48.236366263 +0000 UTC m=+0.010502554 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:20:48.680 INFO:journalctl@ceph.osd.1.vm06.stdout:Mar 07 10:20:48 vm06 podman[65591]: 2026-03-07 10:20:48.678138442 +0000 UTC m=+0.452274711 container died b2acd385f4ca10229be338584e5c4811c3989225e5e242d0b902534c95d627e9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-1-deactivate, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-07T10:20:48.760 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@osd.1.service' 2026-03-07T10:20:48.805 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:20:48.805 INFO:tasks.cephadm.osd.1:Stopped osd.1 2026-03-07T10:20:48.805 INFO:tasks.cephadm.osd.2:Stopping osd.2... 2026-03-07T10:20:48.805 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@osd.2 2026-03-07T10:20:49.291 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:48 vm08 systemd[1]: Stopping Ceph osd.2 for d33fdf60-1a0e-11f1-a719-83e365122cb4... 2026-03-07T10:20:49.291 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:48 vm08 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2[59333]: 2026-03-07T10:20:48.949+0000 7ffa88304640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:20:49.291 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:48 vm08 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2[59333]: 2026-03-07T10:20:48.949+0000 7ffa88304640 -1 osd.2 29 *** Got signal Terminated *** 2026-03-07T10:20:49.291 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:48 vm08 ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2[59333]: 2026-03-07T10:20:48.949+0000 7ffa88304640 -1 osd.2 29 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-07T10:20:54.240 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:53 vm08 podman[65143]: 2026-03-07 10:20:53.978591852 +0000 UTC m=+5.081016386 container died bfc3f165714aefcf273e43f3832932070088b7130a8a3a5b071d6a22ff072819 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:20:54.240 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:53 vm08 podman[65143]: 2026-03-07 10:20:53.995130049 +0000 UTC m=+5.097554583 container remove bfc3f165714aefcf273e43f3832932070088b7130a8a3a5b071d6a22ff072819 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:20:54.240 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:53 vm08 bash[65143]: ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2 2026-03-07T10:20:54.240 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:54 vm08 podman[65209]: 2026-03-07 10:20:54.145299174 +0000 UTC m=+0.017588233 container create 6946ceb4bc7c6b5ad12cc1f3f39a956ee96cb9f14a58d59d7e6c779ea8a8d4a1 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2-deactivate, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:20:54.240 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:54 vm08 podman[65209]: 2026-03-07 10:20:54.18539291 +0000 UTC m=+0.057681979 container init 6946ceb4bc7c6b5ad12cc1f3f39a956ee96cb9f14a58d59d7e6c779ea8a8d4a1 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2-deactivate, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-07T10:20:54.240 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:54 vm08 podman[65209]: 2026-03-07 10:20:54.188779167 +0000 UTC m=+0.061068226 container start 6946ceb4bc7c6b5ad12cc1f3f39a956ee96cb9f14a58d59d7e6c779ea8a8d4a1 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:20:54.240 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:54 vm08 podman[65209]: 2026-03-07 10:20:54.190620713 +0000 UTC m=+0.062909772 container attach 6946ceb4bc7c6b5ad12cc1f3f39a956ee96cb9f14a58d59d7e6c779ea8a8d4a1 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4-osd-2-deactivate, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:20:54.240 INFO:journalctl@ceph.osd.2.vm08.stdout:Mar 07 10:20:54 vm08 podman[65209]: 2026-03-07 10:20:54.138805122 +0000 UTC m=+0.011094181 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:20:54.643 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-d33fdf60-1a0e-11f1-a719-83e365122cb4@osd.2.service' 2026-03-07T10:20:54.682 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:20:54.683 INFO:tasks.cephadm.osd.2:Stopped osd.2 2026-03-07T10:20:54.683 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 --force --keep-logs 2026-03-07T10:20:54.812 INFO:teuthology.orchestra.run.vm03.stdout:Deleting cluster with fsid: d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:20:55.744 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 --force --keep-logs 2026-03-07T10:20:55.871 INFO:teuthology.orchestra.run.vm06.stdout:Deleting cluster with fsid: d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:21:07.219 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 --force --keep-logs 2026-03-07T10:21:07.352 INFO:teuthology.orchestra.run.vm08.stdout:Deleting cluster with fsid: d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:21:18.565 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:21:18.591 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:21:18.626 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:21:18.657 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-07T10:21:18.657 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/crash to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13/remote/vm03/crash 2026-03-07T10:21:18.657 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/crash -- . 2026-03-07T10:21:18.680 INFO:teuthology.orchestra.run.vm03.stderr:tar: /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/crash: Cannot open: No such file or directory 2026-03-07T10:21:18.680 INFO:teuthology.orchestra.run.vm03.stderr:tar: Error is not recoverable: exiting now 2026-03-07T10:21:18.681 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/crash to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13/remote/vm06/crash 2026-03-07T10:21:18.681 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/crash -- . 2026-03-07T10:21:18.712 INFO:teuthology.orchestra.run.vm06.stderr:tar: /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/crash: Cannot open: No such file or directory 2026-03-07T10:21:18.712 INFO:teuthology.orchestra.run.vm06.stderr:tar: Error is not recoverable: exiting now 2026-03-07T10:21:18.714 DEBUG:teuthology.misc:Transferring archived files from vm08:/var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/crash to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13/remote/vm08/crash 2026-03-07T10:21:18.714 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/crash -- . 2026-03-07T10:21:18.742 INFO:teuthology.orchestra.run.vm08.stderr:tar: /var/lib/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/crash: Cannot open: No such file or directory 2026-03-07T10:21:18.742 INFO:teuthology.orchestra.run.vm08.stderr:tar: Error is not recoverable: exiting now 2026-03-07T10:21:18.743 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-07T10:21:18.743 DEBUG:teuthology.orchestra.run.vm03:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v MON_DOWN | egrep -v 'mons down' | egrep -v 'mon down' | egrep -v 'out of quorum' | egrep -v CEPHADM_STRAY_DAEMON | head -n 1 2026-03-07T10:21:18.775 INFO:tasks.cephadm:Compressing logs... 2026-03-07T10:21:18.775 DEBUG:teuthology.orchestra.run.vm03:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:21:18.818 DEBUG:teuthology.orchestra.run.vm06:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:21:18.819 DEBUG:teuthology.orchestra.run.vm08:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:21:18.845 INFO:teuthology.orchestra.run.vm03.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-07T10:21:18.846 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-07T10:21:18.847 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mon.a.log 2026-03-07T10:21:18.848 INFO:teuthology.orchestra.run.vm06.stderr:find: ‘/var/log/rbd-target-api’gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-07T10:21:18.848 INFO:teuthology.orchestra.run.vm06.stderr:: No such file or directory 2026-03-07T10:21:18.848 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/cephadm.log: 90.1%gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.log 2026-03-07T10:21:18.848 INFO:teuthology.orchestra.run.vm03.stderr: -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-07T10:21:18.849 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.audit.log 2026-03-07T10:21:18.849 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mon.a.log: /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.log: 83.7% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.log.gz 2026-03-07T10:21:18.850 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-volume.log 2026-03-07T10:21:18.850 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mgr.a.log 2026-03-07T10:21:18.850 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/cephadm.log: 88.0% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-07T10:21:18.851 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mon.b.log 2026-03-07T10:21:18.851 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.audit.log: 89.4% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.audit.log.gz 2026-03-07T10:21:18.851 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.cephadm.log 2026-03-07T10:21:18.852 INFO:teuthology.orchestra.run.vm08.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-07T10:21:18.852 INFO:teuthology.orchestra.run.vm08.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-07T10:21:18.853 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-volume.log 2026-03-07T10:21:18.853 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.cephadm.log 2026-03-07T10:21:18.853 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/cephadm.log: 88.3% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-07T10:21:18.854 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mon.c.log 2026-03-07T10:21:18.855 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.cephadm.log 2026-03-07T10:21:18.856 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mon.b.log: 93.2% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-volume.log.gz 2026-03-07T10:21:18.856 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.audit.log 2026-03-07T10:21:18.857 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mgr.a.log: gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-volume.log 2026-03-07T10:21:18.857 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.cephadm.log: 79.1% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.cephadm.log.gz 2026-03-07T10:21:18.857 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.log 2026-03-07T10:21:18.858 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mon.c.log: 93.2% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-volume.log.gz 2026-03-07T10:21:18.858 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.audit.log: 89.5% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.audit.log.gz 2026-03-07T10:21:18.858 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.audit.log 2026-03-07T10:21:18.858 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mgr.b.log 2026-03-07T10:21:18.859 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.cephadm.log: 79.1% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.cephadm.log.gz 2026-03-07T10:21:18.859 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.log 2026-03-07T10:21:18.859 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.log: 82.7% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.log.gz 2026-03-07T10:21:18.859 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-osd.1.log 2026-03-07T10:21:18.859 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.cephadm.log: 81.9% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.cephadm.log.gz 2026-03-07T10:21:18.860 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.audit.log: 89.5% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.audit.log.gz 2026-03-07T10:21:18.860 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-osd.2.log 2026-03-07T10:21:18.860 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.log: 82.7% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph.log.gz 2026-03-07T10:21:18.860 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-client.rgw.myrealm1.myzone1.vm08.rkfwlo.log 2026-03-07T10:21:18.863 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mgr.b.log: 89.9% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mgr.b.log.gz 2026-03-07T10:21:18.864 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-client.rgw.myrealm1.myzone1.vm06.kgepvx.log 2026-03-07T10:21:18.864 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-osd.0.log 2026-03-07T10:21:18.867 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-osd.2.log: /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-client.rgw.myrealm1.myzone1.vm08.rkfwlo.log: 63.2% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-client.rgw.myrealm1.myzone1.vm08.rkfwlo.log.gz 2026-03-07T10:21:18.873 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-osd.1.log: /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-client.rgw.myrealm1.myzone1.vm06.kgepvx.log: 63.0% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-client.rgw.myrealm1.myzone1.vm06.kgepvx.log.gz 2026-03-07T10:21:18.879 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-volume.log: /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-osd.0.log: 93.2% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-volume.log.gz 2026-03-07T10:21:18.885 INFO:teuthology.orchestra.run.vm08.stderr: 92.4% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mon.c.log.gz 2026-03-07T10:21:18.886 INFO:teuthology.orchestra.run.vm06.stderr: 92.5% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mon.b.log.gz 2026-03-07T10:21:18.926 INFO:teuthology.orchestra.run.vm03.stderr: 89.7% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mgr.a.log.gz 2026-03-07T10:21:18.941 INFO:teuthology.orchestra.run.vm03.stderr: 92.0% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-mon.a.log.gz 2026-03-07T10:21:19.055 INFO:teuthology.orchestra.run.vm08.stderr: 94.5% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-osd.2.log.gz 2026-03-07T10:21:19.057 INFO:teuthology.orchestra.run.vm08.stderr: 2026-03-07T10:21:19.057 INFO:teuthology.orchestra.run.vm08.stderr:real 0m0.218s 2026-03-07T10:21:19.057 INFO:teuthology.orchestra.run.vm08.stderr:user 0m0.227s 2026-03-07T10:21:19.057 INFO:teuthology.orchestra.run.vm08.stderr:sys 0m0.028s 2026-03-07T10:21:19.071 INFO:teuthology.orchestra.run.vm06.stderr: 94.4% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-osd.1.log.gz 2026-03-07T10:21:19.074 INFO:teuthology.orchestra.run.vm06.stderr: 2026-03-07T10:21:19.074 INFO:teuthology.orchestra.run.vm06.stderr:real 0m0.239s 2026-03-07T10:21:19.074 INFO:teuthology.orchestra.run.vm06.stderr:user 0m0.242s 2026-03-07T10:21:19.074 INFO:teuthology.orchestra.run.vm06.stderr:sys 0m0.037s 2026-03-07T10:21:19.089 INFO:teuthology.orchestra.run.vm03.stderr: 94.5% -- replaced with /var/log/ceph/d33fdf60-1a0e-11f1-a719-83e365122cb4/ceph-osd.0.log.gz 2026-03-07T10:21:19.090 INFO:teuthology.orchestra.run.vm03.stderr: 2026-03-07T10:21:19.090 INFO:teuthology.orchestra.run.vm03.stderr:real 0m0.257s 2026-03-07T10:21:19.091 INFO:teuthology.orchestra.run.vm03.stderr:user 0m0.320s 2026-03-07T10:21:19.091 INFO:teuthology.orchestra.run.vm03.stderr:sys 0m0.036s 2026-03-07T10:21:19.091 INFO:tasks.cephadm:Archiving logs... 2026-03-07T10:21:19.091 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/log/ceph to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13/remote/vm03/log 2026-03-07T10:21:19.091 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-07T10:21:19.176 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/log/ceph to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13/remote/vm06/log 2026-03-07T10:21:19.176 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-07T10:21:19.218 DEBUG:teuthology.misc:Transferring archived files from vm08:/var/log/ceph to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13/remote/vm08/log 2026-03-07T10:21:19.218 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-07T10:21:19.258 INFO:tasks.cephadm:Removing cluster... 2026-03-07T10:21:19.258 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 --force 2026-03-07T10:21:19.419 INFO:teuthology.orchestra.run.vm03.stdout:Deleting cluster with fsid: d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:21:19.532 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 --force 2026-03-07T10:21:19.680 INFO:teuthology.orchestra.run.vm06.stdout:Deleting cluster with fsid: d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:21:19.791 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid d33fdf60-1a0e-11f1-a719-83e365122cb4 --force 2026-03-07T10:21:19.929 INFO:teuthology.orchestra.run.vm08.stdout:Deleting cluster with fsid: d33fdf60-1a0e-11f1-a719-83e365122cb4 2026-03-07T10:21:20.057 INFO:tasks.cephadm:Removing cephadm ... 2026-03-07T10:21:20.057 DEBUG:teuthology.orchestra.run.vm03:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-07T10:21:20.072 DEBUG:teuthology.orchestra.run.vm06:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-07T10:21:20.091 DEBUG:teuthology.orchestra.run.vm08:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-07T10:21:20.108 INFO:tasks.cephadm:Teardown complete 2026-03-07T10:21:20.108 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-07T10:21:20.110 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-07T10:21:20.110 DEBUG:teuthology.orchestra.run.vm03:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:21:20.115 DEBUG:teuthology.orchestra.run.vm06:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:21:20.129 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:21:20.132 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:21:20.133 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-07T10:21:20.133 INFO:teuthology.orchestra.run.vm03.stdout:^+ netcup02.theravenhub.com 2 6 177 44 +321us[ +336us] +/- 18ms 2026-03-07T10:21:20.133 INFO:teuthology.orchestra.run.vm03.stdout:^+ 139-162-156-95.ip.linode> 2 6 177 44 +2349us[+2364us] +/- 30ms 2026-03-07T10:21:20.133 INFO:teuthology.orchestra.run.vm03.stdout:^* srv01.spectre-net.de 2 6 177 42 -2091us[-2076us] +/- 15ms 2026-03-07T10:21:20.133 INFO:teuthology.orchestra.run.vm03.stdout:^+ srv01-nc.securepod.org 2 6 177 43 +808us[ +822us] +/- 19ms 2026-03-07T10:21:20.133 DEBUG:teuthology.orchestra.run.vm08:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:21:20.149 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:21:20.153 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:21:20.153 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-07T10:21:20.153 INFO:teuthology.orchestra.run.vm06.stdout:^* srv01.spectre-net.de 2 6 177 42 -2125us[-2159us] +/- 15ms 2026-03-07T10:21:20.153 INFO:teuthology.orchestra.run.vm06.stdout:^+ srv01-nc.securepod.org 2 6 177 42 +1002us[+1002us] +/- 19ms 2026-03-07T10:21:20.153 INFO:teuthology.orchestra.run.vm06.stdout:^+ netcup02.theravenhub.com 2 6 177 43 +351us[ +317us] +/- 18ms 2026-03-07T10:21:20.154 INFO:teuthology.orchestra.run.vm06.stdout:^+ 139-162-156-95.ip.linode> 2 6 177 44 +2198us[+2165us] +/- 30ms 2026-03-07T10:21:20.166 INFO:teuthology.orchestra.run.vm08.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:21:20.170 INFO:teuthology.orchestra.run.vm08.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:21:20.170 INFO:teuthology.orchestra.run.vm08.stdout:=============================================================================== 2026-03-07T10:21:20.170 INFO:teuthology.orchestra.run.vm08.stdout:^+ srv01-nc.securepod.org 2 6 177 43 +849us[ +849us] +/- 19ms 2026-03-07T10:21:20.170 INFO:teuthology.orchestra.run.vm08.stdout:^* srv01.spectre-net.de 2 6 177 43 -2101us[-2070us] +/- 15ms 2026-03-07T10:21:20.170 INFO:teuthology.orchestra.run.vm08.stdout:^+ netcup02.theravenhub.com 2 6 177 43 +354us[ +385us] +/- 18ms 2026-03-07T10:21:20.170 INFO:teuthology.orchestra.run.vm08.stdout:^+ 139-162-156-95.ip.linode> 2 6 177 43 +2198us[+2229us] +/- 30ms 2026-03-07T10:21:20.170 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-07T10:21:20.173 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-07T10:21:20.173 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-07T10:21:20.175 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-07T10:21:20.177 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-07T10:21:20.179 INFO:teuthology.task.internal:Duration was 392.862330 seconds 2026-03-07T10:21:20.179 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-07T10:21:20.181 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-07T10:21:20.181 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-07T10:21:20.183 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-07T10:21:20.197 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-07T10:21:20.230 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:21:20.243 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:21:20.264 INFO:teuthology.orchestra.run.vm08.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:21:20.700 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-07T10:21:20.700 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm03.local 2026-03-07T10:21:20.700 DEBUG:teuthology.orchestra.run.vm03:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-07T10:21:20.724 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm06.local 2026-03-07T10:21:20.724 DEBUG:teuthology.orchestra.run.vm06:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-07T10:21:20.754 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm08.local 2026-03-07T10:21:20.754 DEBUG:teuthology.orchestra.run.vm08:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-07T10:21:20.781 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-07T10:21:20.781 DEBUG:teuthology.orchestra.run.vm03:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:21:20.782 DEBUG:teuthology.orchestra.run.vm06:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:21:20.796 DEBUG:teuthology.orchestra.run.vm08:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:21:21.320 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-07T10:21:21.320 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:21:21.322 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:21:21.324 DEBUG:teuthology.orchestra.run.vm08:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:21:21.347 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:21:21.347 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:21:21.347 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:21:21.347 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-07T10:21:21.347 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-07T10:21:21.350 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:21:21.350 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:21:21.351 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:21:21.351 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-07T10:21:21.351 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-07T10:21:21.354 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:21:21.355 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:21:21.355 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log/home/ubuntu/cephtest/archive/syslog/kern.log: 2026-03-07T10:21:21.355 INFO:teuthology.orchestra.run.vm08.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-07T10:21:21.356 INFO:teuthology.orchestra.run.vm08.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-07T10:21:21.508 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.3% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-07T10:21:21.522 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.5% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-07T10:21:21.530 INFO:teuthology.orchestra.run.vm08.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.5% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-07T10:21:21.532 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-07T10:21:21.534 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-07T10:21:21.534 DEBUG:teuthology.orchestra.run.vm03:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-07T10:21:21.578 DEBUG:teuthology.orchestra.run.vm06:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-07T10:21:21.604 DEBUG:teuthology.orchestra.run.vm08:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-07T10:21:21.629 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-07T10:21:21.632 DEBUG:teuthology.orchestra.run.vm03:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:21:21.633 DEBUG:teuthology.orchestra.run.vm06:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:21:21.646 DEBUG:teuthology.orchestra.run.vm08:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:21:21.662 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = core 2026-03-07T10:21:21.669 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = core 2026-03-07T10:21:21.699 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern = core 2026-03-07T10:21:21.714 DEBUG:teuthology.orchestra.run.vm03:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:21:21.736 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:21:21.736 DEBUG:teuthology.orchestra.run.vm06:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:21:21.752 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:21:21.753 DEBUG:teuthology.orchestra.run.vm08:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:21:21.770 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:21:21.770 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-07T10:21:21.773 INFO:teuthology.task.internal:Transferring archived files... 2026-03-07T10:21:21.773 DEBUG:teuthology.misc:Transferring archived files from vm03:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13/remote/vm03 2026-03-07T10:21:21.773 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-07T10:21:21.807 DEBUG:teuthology.misc:Transferring archived files from vm06:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13/remote/vm06 2026-03-07T10:21:21.807 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-07T10:21:21.838 DEBUG:teuthology.misc:Transferring archived files from vm08:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/13/remote/vm08 2026-03-07T10:21:21.838 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-07T10:21:21.868 INFO:teuthology.task.internal:Removing archive directory... 2026-03-07T10:21:21.869 DEBUG:teuthology.orchestra.run.vm03:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-07T10:21:21.870 DEBUG:teuthology.orchestra.run.vm06:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-07T10:21:21.880 DEBUG:teuthology.orchestra.run.vm08:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-07T10:21:21.926 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-07T10:21:21.928 INFO:teuthology.task.internal:Not uploading archives. 2026-03-07T10:21:21.928 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-07T10:21:21.930 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-07T10:21:21.930 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-07T10:21:21.932 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-07T10:21:21.939 DEBUG:teuthology.orchestra.run.vm08:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-07T10:21:21.950 INFO:teuthology.orchestra.run.vm03.stdout: 8532146 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 7 10:21 /home/ubuntu/cephtest 2026-03-07T10:21:21.957 INFO:teuthology.orchestra.run.vm06.stdout: 8532146 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 7 10:21 /home/ubuntu/cephtest 2026-03-07T10:21:21.985 INFO:teuthology.orchestra.run.vm08.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 7 10:21 /home/ubuntu/cephtest 2026-03-07T10:21:21.986 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-07T10:21:21.992 INFO:teuthology.run:Summary data: description: orch:cephadm:workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_rgw_multisite} duration: 392.8623299598694 owner: irq0 success: true 2026-03-07T10:21:21.992 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-07T10:21:22.016 INFO:teuthology.run:pass