2026-03-09T00:18:17.896 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-09T00:18:17.901 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T00:18:17.918 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/310 branch: squid description: orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_domain} email: null first_in_suite: false flavor: default job_id: '310' last_in_suite: false machine_type: vps name: kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 workunit: branch: tt-squid sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - mon.a - mgr.x - osd.0 - osd.1 - client.0 - - host.b - cephadm.exclude seed: 8017 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch:cephadm suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 targets: vm03.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNldiBqx7vYIilMF5+CjXsT9qErwmGYh7QM64AvwRpg5shX3V/6VCLHSlgSQNQYsTo+IGuocTMAT7rLVW+UGEMA= vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAcMLnrfJqyaBTgft5WEBCtI0FRnt+mok0dPjdKI7Rj2rIAQVaB8kbWGAdj5gT30Ev/aOaG5KIQhPfwkqYEf+24= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install runc nvmetcli nvme-cli -y - sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf - sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf - cephadm.deploy_samba_ad_dc: role: host.b - cephadm: null - cephadm.shell: host.a: - ceph fs volume create cephfs - cephadm.wait_for_service: service: mds.cephfs - cephadm.shell: host.a: - cmd: ceph fs subvolumegroup create cephfs g1 - cmd: ceph fs subvolume create cephfs sub1 --group-name=g1 --mode=0777 - cmd: ceph fs authorize cephfs client.smbdata / rw - cmd: ceph osd pool create .smb --yes-i-really-mean-it - cmd: ceph osd pool application enable .smb smb - cmd: rados --pool=.smb --namespace=admem1 put conf.toml /dev/stdin stdin: 'samba-container-config = "v0" [configs.admem1] shares = ["share1"] globals = ["default", "domain"] instance_name = "SAMBA" [shares.share1.options] "vfs objects" = "ceph" path = "/" "ceph:config_file" = "/etc/ceph/ceph.conf" "ceph:user_id" = "smbdata" "kernel share modes" = "no" "read only" = "no" "browseable" = "yes" [globals.default.options] "server min protocol" = "SMB2" "load printers" = "no" "printing" = "bsd" "printcap name" = "/dev/null" "disable spoolss" = "yes" "guest ok" = "no" [globals.domain.options] security = "ads" workgroup = "DOMAIN1" realm = "domain1.sink.test" "idmap config * : backend" = "autorid" "idmap config * : range" = "2000-9999999" ' - cmd: ceph config-key set smb/config/admem1/join1.json -i - stdin: '{"username": "Administrator", "password": "Passw0rd"} ' - cephadm.apply: specs: - cluster_id: admem1 config_uri: rados://.smb/admem1/conf.toml custom_dns: - '{{ctx.samba_ad_dc_ip}}' features: - domain include_ceph_users: - client.smbdata join_sources: - rados:mon-config-key:smb/config/admem1/join1.json placement: count: 1 service_id: admem1 service_type: smb - cephadm.wait_for_service: service: smb.admem1 - cephadm.exec: host.b: - sleep 30 - '{{ctx.samba_client_container_cmd|join('' '')}} smbclient -U DOMAIN1\\ckent%1115Rose. //{{''host.a''|role_to_remote|attr(''ip_address'')}}/share1 -c ls' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-08_22:22:45 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-09T00:18:17.918 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa; will attempt to use it 2026-03-09T00:18:17.919 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks 2026-03-09T00:18:17.919 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-09T00:18:17.919 INFO:teuthology.task.internal:Checking packages... 2026-03-09T00:18:17.919 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-09T00:18:17.919 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-09T00:18:17.919 INFO:teuthology.packaging:ref: None 2026-03-09T00:18:17.919 INFO:teuthology.packaging:tag: None 2026-03-09T00:18:17.919 INFO:teuthology.packaging:branch: squid 2026-03-09T00:18:17.919 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:18:17.919 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-09T00:18:18.712 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-09T00:18:18.713 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-09T00:18:18.714 INFO:teuthology.task.internal:no buildpackages task found 2026-03-09T00:18:18.714 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-09T00:18:18.714 INFO:teuthology.task.internal:Saving configuration 2026-03-09T00:18:18.719 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-09T00:18:18.720 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-09T00:18:18.725 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm03.local', 'description': '/archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/310', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 00:17:04.281126', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:03', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNldiBqx7vYIilMF5+CjXsT9qErwmGYh7QM64AvwRpg5shX3V/6VCLHSlgSQNQYsTo+IGuocTMAT7rLVW+UGEMA='} 2026-03-09T00:18:18.732 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm06.local', 'description': '/archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/310', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 00:17:04.281547', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:06', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAcMLnrfJqyaBTgft5WEBCtI0FRnt+mok0dPjdKI7Rj2rIAQVaB8kbWGAdj5gT30Ev/aOaG5KIQhPfwkqYEf+24='} 2026-03-09T00:18:18.732 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-09T00:18:18.732 INFO:teuthology.task.internal:roles: ubuntu@vm03.local - ['host.a', 'mon.a', 'mgr.x', 'osd.0', 'osd.1', 'client.0'] 2026-03-09T00:18:18.732 INFO:teuthology.task.internal:roles: ubuntu@vm06.local - ['host.b', 'cephadm.exclude'] 2026-03-09T00:18:18.732 INFO:teuthology.run_tasks:Running task console_log... 2026-03-09T00:18:18.738 DEBUG:teuthology.task.console_log:vm03 does not support IPMI; excluding 2026-03-09T00:18:18.742 DEBUG:teuthology.task.console_log:vm06 does not support IPMI; excluding 2026-03-09T00:18:18.743 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fcc228f8af0>, signals=[15]) 2026-03-09T00:18:18.743 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-09T00:18:18.743 INFO:teuthology.task.internal:Opening connections... 2026-03-09T00:18:18.743 DEBUG:teuthology.task.internal:connecting to ubuntu@vm03.local 2026-03-09T00:18:18.744 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T00:18:18.800 DEBUG:teuthology.task.internal:connecting to ubuntu@vm06.local 2026-03-09T00:18:18.800 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T00:18:18.856 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-09T00:18:18.857 DEBUG:teuthology.orchestra.run.vm03:> uname -m 2026-03-09T00:18:18.886 INFO:teuthology.orchestra.run.vm03.stdout:x86_64 2026-03-09T00:18:18.887 DEBUG:teuthology.orchestra.run.vm03:> cat /etc/os-release 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:NAME="CentOS Stream" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:VERSION="9" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:ID="centos" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:ID_LIKE="rhel fedora" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:VERSION_ID="9" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:PLATFORM_ID="platform:el9" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:ANSI_COLOR="0;31" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:LOGO="fedora-logo-icon" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:HOME_URL="https://centos.org/" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T00:18:18.942 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T00:18:18.943 INFO:teuthology.lock.ops:Updating vm03.local on lock server 2026-03-09T00:18:18.947 DEBUG:teuthology.orchestra.run.vm06:> uname -m 2026-03-09T00:18:18.964 INFO:teuthology.orchestra.run.vm06.stdout:x86_64 2026-03-09T00:18:18.965 DEBUG:teuthology.orchestra.run.vm06:> cat /etc/os-release 2026-03-09T00:18:19.018 INFO:teuthology.orchestra.run.vm06.stdout:NAME="CentOS Stream" 2026-03-09T00:18:19.018 INFO:teuthology.orchestra.run.vm06.stdout:VERSION="9" 2026-03-09T00:18:19.018 INFO:teuthology.orchestra.run.vm06.stdout:ID="centos" 2026-03-09T00:18:19.018 INFO:teuthology.orchestra.run.vm06.stdout:ID_LIKE="rhel fedora" 2026-03-09T00:18:19.018 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_ID="9" 2026-03-09T00:18:19.019 INFO:teuthology.orchestra.run.vm06.stdout:PLATFORM_ID="platform:el9" 2026-03-09T00:18:19.019 INFO:teuthology.orchestra.run.vm06.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T00:18:19.019 INFO:teuthology.orchestra.run.vm06.stdout:ANSI_COLOR="0;31" 2026-03-09T00:18:19.019 INFO:teuthology.orchestra.run.vm06.stdout:LOGO="fedora-logo-icon" 2026-03-09T00:18:19.019 INFO:teuthology.orchestra.run.vm06.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T00:18:19.019 INFO:teuthology.orchestra.run.vm06.stdout:HOME_URL="https://centos.org/" 2026-03-09T00:18:19.019 INFO:teuthology.orchestra.run.vm06.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T00:18:19.019 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T00:18:19.019 INFO:teuthology.orchestra.run.vm06.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T00:18:19.019 INFO:teuthology.lock.ops:Updating vm06.local on lock server 2026-03-09T00:18:19.023 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-09T00:18:19.025 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-09T00:18:19.026 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-09T00:18:19.026 DEBUG:teuthology.orchestra.run.vm03:> test '!' -e /home/ubuntu/cephtest 2026-03-09T00:18:19.028 DEBUG:teuthology.orchestra.run.vm06:> test '!' -e /home/ubuntu/cephtest 2026-03-09T00:18:19.072 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-09T00:18:19.073 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-09T00:18:19.073 DEBUG:teuthology.orchestra.run.vm03:> test -z $(ls -A /var/lib/ceph) 2026-03-09T00:18:19.082 DEBUG:teuthology.orchestra.run.vm06:> test -z $(ls -A /var/lib/ceph) 2026-03-09T00:18:19.095 INFO:teuthology.orchestra.run.vm03.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T00:18:19.127 INFO:teuthology.orchestra.run.vm06.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T00:18:19.128 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-09T00:18:19.136 DEBUG:teuthology.orchestra.run.vm03:> test -e /ceph-qa-ready 2026-03-09T00:18:19.150 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:18:19.335 DEBUG:teuthology.orchestra.run.vm06:> test -e /ceph-qa-ready 2026-03-09T00:18:19.348 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:18:19.515 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-09T00:18:19.517 INFO:teuthology.task.internal:Creating test directory... 2026-03-09T00:18:19.517 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T00:18:19.518 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T00:18:19.533 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-09T00:18:19.534 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-09T00:18:19.535 INFO:teuthology.task.internal:Creating archive directory... 2026-03-09T00:18:19.535 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T00:18:19.573 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T00:18:19.591 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-09T00:18:19.592 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-09T00:18:19.592 DEBUG:teuthology.orchestra.run.vm03:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T00:18:19.644 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:18:19.645 DEBUG:teuthology.orchestra.run.vm06:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T00:18:19.658 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:18:19.658 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T00:18:19.687 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T00:18:19.712 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T00:18:19.722 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T00:18:19.723 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T00:18:19.734 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T00:18:19.735 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-09T00:18:19.736 INFO:teuthology.task.internal:Configuring sudo... 2026-03-09T00:18:19.736 DEBUG:teuthology.orchestra.run.vm03:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T00:18:19.766 DEBUG:teuthology.orchestra.run.vm06:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T00:18:19.800 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-09T00:18:19.802 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-09T00:18:19.802 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T00:18:19.831 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T00:18:19.855 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T00:18:19.906 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T00:18:19.963 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T00:18:19.963 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T00:18:20.021 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T00:18:20.044 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T00:18:20.099 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T00:18:20.099 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T00:18:20.159 DEBUG:teuthology.orchestra.run.vm03:> sudo service rsyslog restart 2026-03-09T00:18:20.161 DEBUG:teuthology.orchestra.run.vm06:> sudo service rsyslog restart 2026-03-09T00:18:20.204 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T00:18:20.241 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T00:18:20.680 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-09T00:18:20.681 INFO:teuthology.task.internal:Starting timer... 2026-03-09T00:18:20.681 INFO:teuthology.run_tasks:Running task pcp... 2026-03-09T00:18:20.748 INFO:teuthology.run_tasks:Running task selinux... 2026-03-09T00:18:20.770 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0']} 2026-03-09T00:18:20.770 INFO:teuthology.task.selinux:Excluding vm03: VMs are not yet supported 2026-03-09T00:18:20.770 INFO:teuthology.task.selinux:Excluding vm06: VMs are not yet supported 2026-03-09T00:18:20.770 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-09T00:18:20.770 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-09T00:18:20.770 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-09T00:18:20.771 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-09T00:18:20.809 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-09T00:18:20.819 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-09T00:18:20.849 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-09T00:18:21.706 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-09T00:18:21.767 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-09T00:18:21.767 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventoryk6olt3_g --limit vm03.local,vm06.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-09T00:21:28.449 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm03.local'), Remote(name='ubuntu@vm06.local')] 2026-03-09T00:21:28.449 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm03.local' 2026-03-09T00:21:28.449 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T00:21:28.509 DEBUG:teuthology.orchestra.run.vm03:> true 2026-03-09T00:21:28.579 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm03.local' 2026-03-09T00:21:28.579 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm06.local' 2026-03-09T00:21:28.580 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T00:21:28.640 DEBUG:teuthology.orchestra.run.vm06:> true 2026-03-09T00:21:28.721 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm06.local' 2026-03-09T00:21:28.721 INFO:teuthology.run_tasks:Running task clock... 2026-03-09T00:21:28.724 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-09T00:21:28.724 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T00:21:28.724 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T00:21:28.726 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T00:21:28.726 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T00:21:28.749 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-09T00:21:28.763 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-09T00:21:28.786 INFO:teuthology.orchestra.run.vm03.stderr:sudo: ntpd: command not found 2026-03-09T00:21:28.794 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-09T00:21:28.797 INFO:teuthology.orchestra.run.vm03.stdout:506 Cannot talk to daemon 2026-03-09T00:21:28.807 INFO:teuthology.orchestra.run.vm06.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-09T00:21:28.809 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-09T00:21:28.822 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-09T00:21:28.830 INFO:teuthology.orchestra.run.vm06.stderr:sudo: ntpd: command not found 2026-03-09T00:21:28.839 INFO:teuthology.orchestra.run.vm06.stdout:506 Cannot talk to daemon 2026-03-09T00:21:28.857 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-09T00:21:28.868 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-09T00:21:28.872 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T00:21:28.872 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-09T00:21:28.872 INFO:teuthology.orchestra.run.vm03.stdout:^? bond1-1201.fsn-lf-s02.pr> 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T00:21:28.872 INFO:teuthology.orchestra.run.vm03.stdout:^? 185.252.140.125 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T00:21:28.872 INFO:teuthology.orchestra.run.vm03.stdout:^? vps-ber1.orleans.ddnss.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T00:21:28.872 INFO:teuthology.orchestra.run.vm03.stdout:^? mail.morbitzer.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T00:21:28.875 INFO:teuthology.orchestra.run.vm06.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-09T00:21:28.926 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-09T00:21:28.929 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T00:21:28.929 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-09T00:21:28.929 INFO:teuthology.orchestra.run.vm06.stdout:^? mail.morbitzer.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T00:21:28.929 INFO:teuthology.orchestra.run.vm06.stdout:^? bond1-1201.fsn-lf-s02.pr> 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T00:21:28.929 INFO:teuthology.orchestra.run.vm06.stdout:^? 185.252.140.125 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T00:21:28.929 INFO:teuthology.orchestra.run.vm06.stdout:^? vps-ber1.orleans.ddnss.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T00:21:28.929 INFO:teuthology.run_tasks:Running task pexec... 2026-03-09T00:21:28.932 INFO:teuthology.task.pexec:Executing custom commands... 2026-03-09T00:21:28.932 DEBUG:teuthology.orchestra.run.vm03:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-09T00:21:28.932 DEBUG:teuthology.orchestra.run.vm06:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-09T00:21:28.934 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo dnf remove nvme-cli -y 2026-03-09T00:21:28.934 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo dnf install runc nvmetcli nvme-cli -y 2026-03-09T00:21:28.934 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-09T00:21:28.934 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-09T00:21:28.934 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm03.local 2026-03-09T00:21:28.934 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-09T00:21:28.934 INFO:teuthology.task.pexec:sudo dnf install runc nvmetcli nvme-cli -y 2026-03-09T00:21:28.934 INFO:teuthology.task.pexec:sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-09T00:21:28.934 INFO:teuthology.task.pexec:sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-09T00:21:28.972 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo dnf remove nvme-cli -y 2026-03-09T00:21:28.972 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo dnf install runc nvmetcli nvme-cli -y 2026-03-09T00:21:28.973 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-09T00:21:28.973 DEBUG:teuthology.task.pexec:ubuntu@vm06.local< sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-09T00:21:28.973 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm06.local 2026-03-09T00:21:28.973 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-09T00:21:28.973 INFO:teuthology.task.pexec:sudo dnf install runc nvmetcli nvme-cli -y 2026-03-09T00:21:28.973 INFO:teuthology.task.pexec:sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-09T00:21:28.973 INFO:teuthology.task.pexec:sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-09T00:21:29.115 INFO:teuthology.orchestra.run.vm03.stdout:No match for argument: nvme-cli 2026-03-09T00:21:29.116 INFO:teuthology.orchestra.run.vm03.stderr:No packages marked for removal. 2026-03-09T00:21:29.118 INFO:teuthology.orchestra.run.vm03.stdout:Dependencies resolved. 2026-03-09T00:21:29.119 INFO:teuthology.orchestra.run.vm03.stdout:Nothing to do. 2026-03-09T00:21:29.119 INFO:teuthology.orchestra.run.vm03.stdout:Complete! 2026-03-09T00:21:29.190 INFO:teuthology.orchestra.run.vm06.stdout:No match for argument: nvme-cli 2026-03-09T00:21:29.190 INFO:teuthology.orchestra.run.vm06.stderr:No packages marked for removal. 2026-03-09T00:21:29.193 INFO:teuthology.orchestra.run.vm06.stdout:Dependencies resolved. 2026-03-09T00:21:29.193 INFO:teuthology.orchestra.run.vm06.stdout:Nothing to do. 2026-03-09T00:21:29.193 INFO:teuthology.orchestra.run.vm06.stdout:Complete! 2026-03-09T00:21:29.461 INFO:teuthology.orchestra.run.vm03.stdout:Last metadata expiration check: 0:02:13 ago on Mon 09 Mar 2026 12:19:16 AM UTC. 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout:Dependencies resolved. 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout: Package Arch Version Repository Size 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout:Installing: 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout: runc x86_64 4:1.4.0-2.el9 appstream 4.0 M 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout:Installing dependencies: 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-09T00:21:29.551 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:21:29.552 INFO:teuthology.orchestra.run.vm03.stdout:Transaction Summary 2026-03-09T00:21:29.552 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-09T00:21:29.552 INFO:teuthology.orchestra.run.vm03.stdout:Install 7 Packages 2026-03-09T00:21:29.552 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:21:29.552 INFO:teuthology.orchestra.run.vm03.stdout:Total download size: 6.3 M 2026-03-09T00:21:29.552 INFO:teuthology.orchestra.run.vm03.stdout:Installed size: 24 M 2026-03-09T00:21:29.552 INFO:teuthology.orchestra.run.vm03.stdout:Downloading Packages: 2026-03-09T00:21:29.640 INFO:teuthology.orchestra.run.vm06.stdout:Last metadata expiration check: 0:02:12 ago on Mon 09 Mar 2026 12:19:17 AM UTC. 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout:Dependencies resolved. 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout: Package Arch Version Repository Size 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout:Installing: 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout: runc x86_64 4:1.4.0-2.el9 appstream 4.0 M 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout:Installing dependencies: 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout:Transaction Summary 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout:================================================================================ 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout:Install 7 Packages 2026-03-09T00:21:29.758 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T00:21:29.759 INFO:teuthology.orchestra.run.vm06.stdout:Total download size: 6.3 M 2026-03-09T00:21:29.759 INFO:teuthology.orchestra.run.vm06.stdout:Installed size: 24 M 2026-03-09T00:21:29.759 INFO:teuthology.orchestra.run.vm06.stdout:Downloading Packages: 2026-03-09T00:21:29.898 INFO:teuthology.orchestra.run.vm03.stdout:(1/7): python3-configshell-1.1.30-1.el9.noarch. 629 kB/s | 72 kB 00:00 2026-03-09T00:21:29.899 INFO:teuthology.orchestra.run.vm03.stdout:(2/7): nvmetcli-0.8-3.el9.noarch.rpm 381 kB/s | 44 kB 00:00 2026-03-09T00:21:29.956 INFO:teuthology.orchestra.run.vm03.stdout:(3/7): python3-kmod-0.9-32.el9.x86_64.rpm 1.4 MB/s | 84 kB 00:00 2026-03-09T00:21:29.956 INFO:teuthology.orchestra.run.vm03.stdout:(4/7): python3-pyparsing-2.4.7-9.el9.noarch.rpm 2.5 MB/s | 150 kB 00:00 2026-03-09T00:21:30.013 INFO:teuthology.orchestra.run.vm03.stdout:(5/7): nvme-cli-2.16-1.el9.x86_64.rpm 5.0 MB/s | 1.2 MB 00:00 2026-03-09T00:21:30.046 INFO:teuthology.orchestra.run.vm03.stdout:(6/7): python3-urwid-2.1.2-4.el9.x86_64.rpm 9.1 MB/s | 837 kB 00:00 2026-03-09T00:21:31.647 INFO:teuthology.orchestra.run.vm03.stdout:(7/7): runc-1.4.0-2.el9.x86_64.rpm 2.3 MB/s | 4.0 MB 00:01 2026-03-09T00:21:31.647 INFO:teuthology.orchestra.run.vm03.stdout:-------------------------------------------------------------------------------- 2026-03-09T00:21:31.647 INFO:teuthology.orchestra.run.vm03.stdout:Total 3.0 MB/s | 6.3 MB 00:02 2026-03-09T00:21:31.726 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction check 2026-03-09T00:21:31.733 INFO:teuthology.orchestra.run.vm03.stdout:Transaction check succeeded. 2026-03-09T00:21:31.733 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction test 2026-03-09T00:21:31.796 INFO:teuthology.orchestra.run.vm03.stdout:Transaction test succeeded. 2026-03-09T00:21:31.797 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction 2026-03-09T00:21:31.954 INFO:teuthology.orchestra.run.vm03.stdout: Preparing : 1/1 2026-03-09T00:21:31.963 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/7 2026-03-09T00:21:31.974 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/7 2026-03-09T00:21:31.980 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-09T00:21:31.986 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-09T00:21:31.988 INFO:teuthology.orchestra.run.vm03.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/7 2026-03-09T00:21:32.037 INFO:teuthology.orchestra.run.vm03.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/7 2026-03-09T00:21:32.115 INFO:teuthology.orchestra.run.vm06.stdout:(1/7): python3-configshell-1.1.30-1.el9.noarch. 359 kB/s | 72 kB 00:00 2026-03-09T00:21:32.139 INFO:teuthology.orchestra.run.vm06.stdout:(2/7): nvmetcli-0.8-3.el9.noarch.rpm 195 kB/s | 44 kB 00:00 2026-03-09T00:21:32.163 INFO:teuthology.orchestra.run.vm03.stdout: Installing : runc-4:1.4.0-2.el9.x86_64 6/7 2026-03-09T00:21:32.168 INFO:teuthology.orchestra.run.vm03.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-09T00:21:32.231 INFO:teuthology.orchestra.run.vm06.stdout:(3/7): python3-kmod-0.9-32.el9.x86_64.rpm 723 kB/s | 84 kB 00:00 2026-03-09T00:21:32.247 INFO:teuthology.orchestra.run.vm06.stdout:(4/7): python3-pyparsing-2.4.7-9.el9.noarch.rpm 1.4 MB/s | 150 kB 00:00 2026-03-09T00:21:32.305 INFO:teuthology.orchestra.run.vm06.stdout:(5/7): nvme-cli-2.16-1.el9.x86_64.rpm 3.0 MB/s | 1.2 MB 00:00 2026-03-09T00:21:32.389 INFO:teuthology.orchestra.run.vm06.stdout:(6/7): python3-urwid-2.1.2-4.el9.x86_64.rpm 5.2 MB/s | 837 kB 00:00 2026-03-09T00:21:32.448 INFO:teuthology.orchestra.run.vm03.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-09T00:21:32.448 INFO:teuthology.orchestra.run.vm03.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-09T00:21:32.448 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:21:32.571 INFO:teuthology.orchestra.run.vm06.stdout:(7/7): runc-1.4.0-2.el9.x86_64.rpm 12 MB/s | 4.0 MB 00:00 2026-03-09T00:21:32.571 INFO:teuthology.orchestra.run.vm06.stdout:-------------------------------------------------------------------------------- 2026-03-09T00:21:32.571 INFO:teuthology.orchestra.run.vm06.stdout:Total 2.2 MB/s | 6.3 MB 00:02 2026-03-09T00:21:32.656 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction check 2026-03-09T00:21:32.664 INFO:teuthology.orchestra.run.vm06.stdout:Transaction check succeeded. 2026-03-09T00:21:32.664 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction test 2026-03-09T00:21:32.741 INFO:teuthology.orchestra.run.vm06.stdout:Transaction test succeeded. 2026-03-09T00:21:32.742 INFO:teuthology.orchestra.run.vm06.stdout:Running transaction 2026-03-09T00:21:32.892 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/7 2026-03-09T00:21:32.892 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/7 2026-03-09T00:21:32.892 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-09T00:21:32.892 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-09T00:21:32.892 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/7 2026-03-09T00:21:32.892 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/7 2026-03-09T00:21:32.954 INFO:teuthology.orchestra.run.vm06.stdout: Preparing : 1/1 2026-03-09T00:21:32.954 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : runc-4:1.4.0-2.el9.x86_64 7/7 2026-03-09T00:21:32.954 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:21:32.954 INFO:teuthology.orchestra.run.vm03.stdout:Installed: 2026-03-09T00:21:32.954 INFO:teuthology.orchestra.run.vm03.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-09T00:21:32.954 INFO:teuthology.orchestra.run.vm03.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-09T00:21:32.954 INFO:teuthology.orchestra.run.vm03.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-09T00:21:32.954 INFO:teuthology.orchestra.run.vm03.stdout: runc-4:1.4.0-2.el9.x86_64 2026-03-09T00:21:32.954 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:21:32.954 INFO:teuthology.orchestra.run.vm03.stdout:Complete! 2026-03-09T00:21:32.965 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/7 2026-03-09T00:21:32.980 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/7 2026-03-09T00:21:32.989 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-09T00:21:32.998 INFO:teuthology.orchestra.run.vm06.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-09T00:21:33.001 INFO:teuthology.orchestra.run.vm06.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/7 2026-03-09T00:21:33.017 DEBUG:teuthology.parallel:result is None 2026-03-09T00:21:33.057 INFO:teuthology.orchestra.run.vm06.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/7 2026-03-09T00:21:33.221 INFO:teuthology.orchestra.run.vm06.stdout: Installing : runc-4:1.4.0-2.el9.x86_64 6/7 2026-03-09T00:21:33.224 INFO:teuthology.orchestra.run.vm06.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-09T00:21:33.614 INFO:teuthology.orchestra.run.vm06.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-09T00:21:33.614 INFO:teuthology.orchestra.run.vm06.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-09T00:21:33.614 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T00:21:34.122 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/7 2026-03-09T00:21:34.122 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/7 2026-03-09T00:21:34.122 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-09T00:21:34.122 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-09T00:21:34.122 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/7 2026-03-09T00:21:34.122 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/7 2026-03-09T00:21:34.203 INFO:teuthology.orchestra.run.vm06.stdout: Verifying : runc-4:1.4.0-2.el9.x86_64 7/7 2026-03-09T00:21:34.203 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T00:21:34.203 INFO:teuthology.orchestra.run.vm06.stdout:Installed: 2026-03-09T00:21:34.203 INFO:teuthology.orchestra.run.vm06.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-09T00:21:34.203 INFO:teuthology.orchestra.run.vm06.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-09T00:21:34.204 INFO:teuthology.orchestra.run.vm06.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-09T00:21:34.204 INFO:teuthology.orchestra.run.vm06.stdout: runc-4:1.4.0-2.el9.x86_64 2026-03-09T00:21:34.204 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T00:21:34.204 INFO:teuthology.orchestra.run.vm06.stdout:Complete! 2026-03-09T00:21:34.295 DEBUG:teuthology.parallel:result is None 2026-03-09T00:21:34.295 INFO:teuthology.run_tasks:Running task cephadm.deploy_samba_ad_dc... 2026-03-09T00:21:34.348 INFO:tasks.cephadm:Testing if podman is available 2026-03-09T00:21:34.348 DEBUG:teuthology.orchestra.run.vm06:> sudo podman --help 2026-03-09T00:21:34.401 INFO:teuthology.orchestra.run.vm06.stdout:Manage pods, containers and images 2026-03-09T00:21:34.401 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout:Usage: 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: podman [options] [command] 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout:Available Commands: 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: artifact Manage OCI artifacts 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: attach Attach to a running container 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: auto-update Auto update containers according to their auto-update policy 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: build Build an image using instructions from Containerfiles 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: commit Create new image based on the changed container 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: compose Run compose workloads via an external provider such as docker-compose or podman-compose 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: container Manage containers 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: cp Copy files/folders between a container and the local filesystem 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: create Create but do not start a container 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: diff Display the changes to the object's file system 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: events Show podman system events 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: exec Run a process in a running container 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: export Export container's filesystem contents as a tar archive 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: farm Farm out builds to remote machines 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: generate Generate structured data based on containers, pods or volumes 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: healthcheck Manage health checks on containers 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: help Help about any command 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: history Show history of a specified image 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: image Manage images 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: images List images in local storage 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: import Import a tarball to create a filesystem image 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: info Display podman system information 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: init Initialize one or more containers 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: inspect Display the configuration of object denoted by ID 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: kill Kill one or more running containers with a specific signal 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: kube Play containers, pods or volumes from a structured file 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: load Load image(s) from a tar archive 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: login Log in to a container registry 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: logout Log out of a container registry 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: logs Fetch the logs of one or more containers 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: machine Manage a virtual machine 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: manifest Manipulate manifest lists and image indexes 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: mount Mount a working container's root filesystem 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: network Manage networks 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: pause Pause all the processes in one or more containers 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: pod Manage pods 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: port List port mappings or a specific mapping for the container 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: ps List containers 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: pull Pull an image from a registry 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: push Push an image to a specified destination 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: quadlet Allows users to manage Quadlets 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: rename Rename an existing container 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: restart Restart one or more containers 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: rm Remove one or more containers 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: rmi Remove one or more images from local storage 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: run Run a command in a new container 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: save Save image(s) to an archive 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: search Search registry for image 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: secret Manage secrets 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: start Start one or more containers 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: stats Display a live stream of container resource usage statistics 2026-03-09T00:21:34.402 INFO:teuthology.orchestra.run.vm06.stdout: stop Stop one or more containers 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: system Manage podman 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: tag Add an additional name to a local image 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: top Display the running processes of a container 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: unmount Unmount working container's root filesystem 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: unpause Unpause the processes in one or more containers 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: unshare Run a command in a modified user namespace 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: untag Remove a name from a local image 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: update Update an existing container 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: version Display the Podman version information 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: volume Manage volumes 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: wait Block on one or more containers 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout:Options: 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --cdi-spec-dir stringArray Set the CDI spec directory path (may be set multiple times) (default [/etc/cdi,/var/run/cdi]) 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --cgroup-manager string Cgroup manager to use ("cgroupfs"|"systemd") (default "systemd") 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --config string Path to directory containing authentication config file 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --conmon string Path of the conmon binary 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: -c, --connection string Connection to use for remote Podman service (CONTAINER_CONNECTION) 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --events-backend string Events backend to use ("file"|"journald"|"none") (default "journald") 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --help Help for podman 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --hooks-dir stringArray Set the OCI hooks directory path (may be set multiple times) (default [/usr/share/containers/oci/hooks.d]) 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --identity string path to SSH identity file, (CONTAINER_SSHKEY) 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --imagestore string Path to the 'image store', different from 'graph root', use this to split storing the image into a separate 'image store', see 'man containers-storage.conf' for details 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --log-level string Log messages above specified level (trace, debug, info, warn, warning, error, fatal, panic) (default "warn") 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --module stringArray Load the containers.conf(5) module 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --network-cmd-path string Path to the command for configuring the network 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --network-config-dir string Path of the configuration directory for networks 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --out string Send output (stdout) from podman to a file 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: -r, --remote Access remote Podman service 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --root string Path to the graph root directory where images, containers, etc. are stored 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --runroot string Path to the 'run directory' where all state information is stored 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --runtime string Path to the OCI-compatible binary used to run containers. (default "runc") 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --runtime-flag stringArray add global flags for the container runtime 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --ssh string define the ssh mode (default "golang") 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --storage-driver string Select which storage driver is used to manage storage of images and containers 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --storage-opt stringArray Used to pass an option to the storage driver 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --syslog Output podman-internal logs to syslog as well as the console (default false) 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --tls-ca string path to TLS certificate Authority PEM file for remote. 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --tls-cert string path to TLS client certificate PEM file for remote. 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --tls-key string path to TLS client certificate private key PEM file for remote. 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --tmpdir string Path to the tmp directory for libpod state content. 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: Note: use the environment variable 'TMPDIR' to change the temporary storage location for container images, '/var/tmp'. 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: (default "/run/libpod") 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --transient-store Enable transient container storage 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --url string URL to access Podman service (CONTAINER_HOST) (default "unix:///run/podman/podman.sock") 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: -v, --version version for podman 2026-03-09T00:21:34.403 INFO:teuthology.orchestra.run.vm06.stdout: --volumepath string Path to the volume directory in which volume data is stored 2026-03-09T00:21:34.411 DEBUG:teuthology.orchestra.run.vm06:> sudo podman pull quay.io/samba.org/samba-ad-server:latest 2026-03-09T00:21:34.515 INFO:teuthology.orchestra.run.vm06.stderr:Trying to pull quay.io/samba.org/samba-ad-server:latest... 2026-03-09T00:21:36.001 INFO:teuthology.orchestra.run.vm06.stderr:Getting image source signatures 2026-03-09T00:21:36.001 INFO:teuthology.orchestra.run.vm06.stderr:Copying blob sha256:c0920deb4092ae59f2370126b40d0ac9196853983118586d2d2f6e347ef1d845 2026-03-09T00:21:36.001 INFO:teuthology.orchestra.run.vm06.stderr:Copying blob sha256:cd7ba9a7bc37ac1a55979cd5f3d20903c275d251cf38ceb51cacff1f5f96ae72 2026-03-09T00:21:36.001 INFO:teuthology.orchestra.run.vm06.stderr:Copying blob sha256:708792c66f64961ac2acf26b515f9a1ead2f27a8d2478a21e82da9bb485205d3 2026-03-09T00:21:36.001 INFO:teuthology.orchestra.run.vm06.stderr:Copying blob sha256:001ffc0a1d8385a856651332438933f37dcd1413b095ec35d55b737a52e0a704 2026-03-09T00:21:36.001 INFO:teuthology.orchestra.run.vm06.stderr:Copying blob sha256:32f46c1320dbd4aa5db0c3c7eb552b73f601eee32b3971afa9315a7a5bc99d4b 2026-03-09T00:21:36.001 INFO:teuthology.orchestra.run.vm06.stderr:Copying blob sha256:62a1f31ed0bb768484d676b5f4199490a0ef7149e0d33ef5020f1da5334c40df 2026-03-09T00:21:55.939 INFO:teuthology.orchestra.run.vm06.stderr:Copying config sha256:4713b105ffd2255e32e8b9dec6c5e94988a5706fac59a2989f19b1b472c3c536 2026-03-09T00:21:55.942 INFO:teuthology.orchestra.run.vm06.stderr:Writing manifest to image destination 2026-03-09T00:21:55.961 INFO:teuthology.orchestra.run.vm06.stdout:4713b105ffd2255e32e8b9dec6c5e94988a5706fac59a2989f19b1b472c3c536 2026-03-09T00:21:55.968 DEBUG:teuthology.orchestra.run.vm06:> sudo podman pull quay.io/samba.org/samba-client:latest 2026-03-09T00:21:56.020 INFO:teuthology.orchestra.run.vm06.stderr:Trying to pull quay.io/samba.org/samba-client:latest... 2026-03-09T00:21:57.443 INFO:teuthology.orchestra.run.vm06.stderr:Getting image source signatures 2026-03-09T00:21:57.443 INFO:teuthology.orchestra.run.vm06.stderr:Copying blob sha256:ebf30006f6881eb69abb9d2503d1076468657e1c29d69b17215175c43ffad511 2026-03-09T00:21:57.443 INFO:teuthology.orchestra.run.vm06.stderr:Copying blob sha256:62a1f31ed0bb768484d676b5f4199490a0ef7149e0d33ef5020f1da5334c40df 2026-03-09T00:22:05.893 INFO:teuthology.orchestra.run.vm06.stderr:Copying config sha256:c060b5405e2be405765eab4bc4b30f141a78581b7e1c59fecadfe4c356f5fc66 2026-03-09T00:22:05.895 INFO:teuthology.orchestra.run.vm06.stderr:Writing manifest to image destination 2026-03-09T00:22:05.905 INFO:teuthology.orchestra.run.vm06.stdout:c060b5405e2be405765eab4bc4b30f141a78581b7e1c59fecadfe4c356f5fc66 2026-03-09T00:22:05.911 DEBUG:teuthology.orchestra.run.vm06:> ss -lunH 2026-03-09T00:22:05.930 INFO:teuthology.orchestra.run.vm06.stdout:UNCONN 0 0 0.0.0.0:111 0.0.0.0:* 2026-03-09T00:22:05.930 INFO:teuthology.orchestra.run.vm06.stdout:UNCONN 0 0 127.0.0.1:323 0.0.0.0:* 2026-03-09T00:22:05.930 INFO:teuthology.orchestra.run.vm06.stdout:UNCONN 0 0 [::]:111 [::]:* 2026-03-09T00:22:05.931 INFO:teuthology.orchestra.run.vm06.stdout:UNCONN 0 0 [::1]:323 [::]:* 2026-03-09T00:22:05.931 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /var/lib/samba/container/logs /var/lib/samba/container/data 2026-03-09T00:22:05.998 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /var/tmp/samba 2026-03-09T00:22:06.066 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T00:22:06.066 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/var/tmp/samba/container.json 2026-03-09T00:22:06.138 DEBUG:teuthology.orchestra.run.vm06:> sudo podman run -d --name=samba-ad --network=host --privileged --volume=/var/tmp/samba:/etc/samba-container:ro -eSAMBACC_CONFIG=/etc/samba-container/container.json quay.io/samba.org/samba-ad-server:latest 2026-03-09T00:22:06.313 INFO:teuthology.orchestra.run.vm06.stdout:d3bc89d62e862408165f00746d7449888eb6185e1de1175e9cefc3e72d04cad6 2026-03-09T00:22:06.567 INFO:tasks.cephadm:Probing SMB status of DC 192.168.123.106, idx=0 2026-03-09T00:22:06.567 DEBUG:teuthology.orchestra.run.vm06:> sudo podman run --rm --net=host --dns=192.168.123.106 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //domain1.sink.test/sysvol -c ls 2026-03-09T00:22:06.823 INFO:teuthology.orchestra.run.vm06.stderr:do_connect: Connection to domain1.sink.test failed (Error NT_STATUS_UNSUCCESSFUL) 2026-03-09T00:22:06.952 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:22:07.452 INFO:tasks.cephadm:Probing SMB status of DC 192.168.123.106, idx=1 2026-03-09T00:22:07.453 DEBUG:teuthology.orchestra.run.vm06:> sudo podman run --rm --net=host --dns=192.168.123.106 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //domain1.sink.test/sysvol -c ls 2026-03-09T00:22:07.794 INFO:teuthology.orchestra.run.vm06.stderr:do_connect: Connection to domain1.sink.test failed (Error NT_STATUS_UNSUCCESSFUL) 2026-03-09T00:22:07.924 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:22:08.925 INFO:tasks.cephadm:Probing SMB status of DC 192.168.123.106, idx=2 2026-03-09T00:22:08.926 DEBUG:teuthology.orchestra.run.vm06:> sudo podman run --rm --net=host --dns=192.168.123.106 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //domain1.sink.test/sysvol -c ls 2026-03-09T00:22:09.174 INFO:teuthology.orchestra.run.vm06.stderr:do_connect: Connection to domain1.sink.test failed (Error NT_STATUS_UNSUCCESSFUL) 2026-03-09T00:22:09.297 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:22:11.298 INFO:tasks.cephadm:Probing SMB status of DC 192.168.123.106, idx=3 2026-03-09T00:22:11.299 DEBUG:teuthology.orchestra.run.vm06:> sudo podman run --rm --net=host --dns=192.168.123.106 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //domain1.sink.test/sysvol -c ls 2026-03-09T00:22:11.722 INFO:teuthology.orchestra.run.vm06.stderr:do_connect: Connection to domain1.sink.test failed (Error NT_STATUS_UNSUCCESSFUL) 2026-03-09T00:22:11.851 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:22:15.852 INFO:tasks.cephadm:Probing SMB status of DC 192.168.123.106, idx=4 2026-03-09T00:22:15.852 DEBUG:teuthology.orchestra.run.vm06:> sudo podman run --rm --net=host --dns=192.168.123.106 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //domain1.sink.test/sysvol -c ls 2026-03-09T00:22:16.069 INFO:teuthology.orchestra.run.vm06.stdout: . D 0 Mon Mar 9 00:22:08 2026 2026-03-09T00:22:16.069 INFO:teuthology.orchestra.run.vm06.stdout: .. D 0 Mon Mar 9 00:22:08 2026 2026-03-09T00:22:16.069 INFO:teuthology.orchestra.run.vm06.stdout: domain1.sink.test D 0 Mon Mar 9 00:22:07 2026 2026-03-09T00:22:16.069 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T00:22:16.069 INFO:teuthology.orchestra.run.vm06.stdout: 41876460 blocks of size 1024. 38795992 blocks available 2026-03-09T00:22:16.197 INFO:tasks.cephadm:SMB status probe succeeded 2026-03-09T00:22:16.197 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-09T00:22:16.199 INFO:tasks.cephadm:Config: {'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-09T00:22:16.199 INFO:tasks.cephadm:Cluster image is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:22:16.199 INFO:tasks.cephadm:Cluster fsid is 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:22:16.199 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-09T00:22:16.200 INFO:tasks.cephadm:Monitor IPs: {'mon.a': '192.168.123.103'} 2026-03-09T00:22:16.200 INFO:tasks.cephadm:First mon is mon.a on vm03 2026-03-09T00:22:16.200 INFO:tasks.cephadm:First mgr is x 2026-03-09T00:22:16.200 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-09T00:22:16.200 DEBUG:teuthology.orchestra.run.vm03:> sudo hostname $(hostname -s) 2026-03-09T00:22:16.225 DEBUG:teuthology.orchestra.run.vm06:> sudo hostname $(hostname -s) 2026-03-09T00:22:16.253 INFO:tasks.cephadm:Downloading "compiled" cephadm from cachra 2026-03-09T00:22:16.253 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:22:16.917 INFO:tasks.cephadm:builder_project result: [{'url': 'https://3.chacra.ceph.com/r/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/flavors/default/', 'chacra_url': 'https://3.chacra.ceph.com/repos/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/flavors/default/', 'ref': 'squid', 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df', 'distro': 'centos', 'distro_version': '9', 'distro_codename': None, 'modified': '2026-02-25 18:55:15.146628', 'status': 'ready', 'flavor': 'default', 'project': 'ceph', 'archs': ['source', 'x86_64'], 'extra': {'version': '19.2.3-678-ge911bdeb', 'package_manager_version': '19.2.3-678.ge911bdeb', 'build_url': 'https://jenkins.ceph.com/job/ceph-dev-pipeline/3275/', 'root_build_cause': '', 'node_name': '10.20.192.26+soko16', 'job_name': 'ceph-dev-pipeline'}}] 2026-03-09T00:22:17.551 INFO:tasks.util.chacra:got chacra host 3.chacra.ceph.com, ref squid, sha1 e911bdebe5c8faa3800735d1568fcdca65db60df from https://shaman.ceph.com/api/search/?project=ceph&distros=centos%2F9%2Fx86_64&flavor=default&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:22:17.553 INFO:tasks.cephadm:Discovered cachra url: https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm 2026-03-09T00:22:17.553 INFO:tasks.cephadm:Downloading cephadm from url: https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm 2026-03-09T00:22:17.553 DEBUG:teuthology.orchestra.run.vm03:> curl --silent -L https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-09T00:22:19.218 INFO:teuthology.orchestra.run.vm03.stdout:-rw-r--r--. 1 ubuntu ubuntu 788355 Mar 9 00:22 /home/ubuntu/cephtest/cephadm 2026-03-09T00:22:19.218 DEBUG:teuthology.orchestra.run.vm06:> curl --silent -L https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-09T00:22:20.805 INFO:teuthology.orchestra.run.vm06.stdout:-rw-r--r--. 1 ubuntu ubuntu 788355 Mar 9 00:22 /home/ubuntu/cephtest/cephadm 2026-03-09T00:22:20.805 DEBUG:teuthology.orchestra.run.vm03:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-09T00:22:20.820 DEBUG:teuthology.orchestra.run.vm06:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-09T00:22:20.844 INFO:tasks.cephadm:Pulling image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on all hosts... 2026-03-09T00:22:20.844 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-09T00:22:20.862 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-09T00:22:21.020 INFO:teuthology.orchestra.run.vm03.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T00:22:21.027 INFO:teuthology.orchestra.run.vm06.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T00:23:01.409 INFO:teuthology.orchestra.run.vm06.stdout:{ 2026-03-09T00:23:01.409 INFO:teuthology.orchestra.run.vm06.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-09T00:23:01.409 INFO:teuthology.orchestra.run.vm06.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-09T00:23:01.409 INFO:teuthology.orchestra.run.vm06.stdout: "repo_digests": [ 2026-03-09T00:23:01.409 INFO:teuthology.orchestra.run.vm06.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-09T00:23:01.409 INFO:teuthology.orchestra.run.vm06.stdout: ] 2026-03-09T00:23:01.409 INFO:teuthology.orchestra.run.vm06.stdout:} 2026-03-09T00:23:13.982 INFO:teuthology.orchestra.run.vm03.stdout:{ 2026-03-09T00:23:13.982 INFO:teuthology.orchestra.run.vm03.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-09T00:23:13.982 INFO:teuthology.orchestra.run.vm03.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-09T00:23:13.982 INFO:teuthology.orchestra.run.vm03.stdout: "repo_digests": [ 2026-03-09T00:23:13.982 INFO:teuthology.orchestra.run.vm03.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-09T00:23:13.982 INFO:teuthology.orchestra.run.vm03.stdout: ] 2026-03-09T00:23:13.982 INFO:teuthology.orchestra.run.vm03.stdout:} 2026-03-09T00:23:14.003 DEBUG:teuthology.orchestra.run.vm03:> sudo mkdir -p /etc/ceph 2026-03-09T00:23:14.032 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /etc/ceph 2026-03-09T00:23:14.062 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 777 /etc/ceph 2026-03-09T00:23:14.096 DEBUG:teuthology.orchestra.run.vm06:> sudo chmod 777 /etc/ceph 2026-03-09T00:23:14.131 INFO:tasks.cephadm:Writing seed config... 2026-03-09T00:23:14.132 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-09T00:23:14.132 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-09T00:23:14.132 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-09T00:23:14.132 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-09T00:23:14.132 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-09T00:23:14.132 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-09T00:23:14.132 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-09T00:23:14.132 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-09T00:23:14.133 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T00:23:14.133 DEBUG:teuthology.orchestra.run.vm03:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-09T00:23:14.152 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = true bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-09T00:23:14.152 DEBUG:teuthology.orchestra.run.vm03:mon.a> sudo journalctl -f -n 0 -u ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mon.a.service 2026-03-09T00:23:14.194 DEBUG:teuthology.orchestra.run.vm03:mgr.x> sudo journalctl -f -n 0 -u ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mgr.x.service 2026-03-09T00:23:14.236 INFO:tasks.cephadm:Bootstrapping... 2026-03-09T00:23:14.236 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df -v bootstrap --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id x --orphan-initial-daemons --skip-monitoring-stack --mon-ip 192.168.123.103 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-09T00:23:14.377 INFO:teuthology.orchestra.run.vm03.stdout:-------------------------------------------------------------------------------- 2026-03-09T00:23:14.377 INFO:teuthology.orchestra.run.vm03.stdout:cephadm ['--image', 'quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df', '-v', 'bootstrap', '--fsid', '06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-id', 'a', '--mgr-id', 'x', '--orphan-initial-daemons', '--skip-monitoring-stack', '--mon-ip', '192.168.123.103', '--skip-admin-label'] 2026-03-09T00:23:14.377 INFO:teuthology.orchestra.run.vm03.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-09T00:23:14.377 INFO:teuthology.orchestra.run.vm03.stdout:Verifying podman|docker is present... 2026-03-09T00:23:14.394 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 5.8.0 2026-03-09T00:23:14.395 INFO:teuthology.orchestra.run.vm03.stdout:Verifying lvm2 is present... 2026-03-09T00:23:14.395 INFO:teuthology.orchestra.run.vm03.stdout:Verifying time synchronization is in place... 2026-03-09T00:23:14.401 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-09T00:23:14.401 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-09T00:23:14.409 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-09T00:23:14.409 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout inactive 2026-03-09T00:23:14.416 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout enabled 2026-03-09T00:23:14.422 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout active 2026-03-09T00:23:14.422 INFO:teuthology.orchestra.run.vm03.stdout:Unit chronyd.service is enabled and running 2026-03-09T00:23:14.422 INFO:teuthology.orchestra.run.vm03.stdout:Repeating the final host check... 2026-03-09T00:23:14.442 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 5.8.0 2026-03-09T00:23:14.442 INFO:teuthology.orchestra.run.vm03.stdout:podman (/bin/podman) version 5.8.0 is present 2026-03-09T00:23:14.442 INFO:teuthology.orchestra.run.vm03.stdout:systemctl is present 2026-03-09T00:23:14.442 INFO:teuthology.orchestra.run.vm03.stdout:lvcreate is present 2026-03-09T00:23:14.449 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-09T00:23:14.449 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-09T00:23:14.456 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-09T00:23:14.456 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout inactive 2026-03-09T00:23:14.463 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout enabled 2026-03-09T00:23:14.468 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout active 2026-03-09T00:23:14.469 INFO:teuthology.orchestra.run.vm03.stdout:Unit chronyd.service is enabled and running 2026-03-09T00:23:14.469 INFO:teuthology.orchestra.run.vm03.stdout:Host looks OK 2026-03-09T00:23:14.469 INFO:teuthology.orchestra.run.vm03.stdout:Cluster fsid: 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:14.469 INFO:teuthology.orchestra.run.vm03.stdout:Acquiring lock 139933513689024 on /run/cephadm/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86.lock 2026-03-09T00:23:14.469 INFO:teuthology.orchestra.run.vm03.stdout:Lock 139933513689024 acquired on /run/cephadm/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86.lock 2026-03-09T00:23:14.469 INFO:teuthology.orchestra.run.vm03.stdout:Verifying IP 192.168.123.103 port 3300 ... 2026-03-09T00:23:14.470 INFO:teuthology.orchestra.run.vm03.stdout:Verifying IP 192.168.123.103 port 6789 ... 2026-03-09T00:23:14.470 INFO:teuthology.orchestra.run.vm03.stdout:Base mon IP(s) is [192.168.123.103:3300, 192.168.123.103:6789], mon addrv is [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-09T00:23:14.473 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.103 metric 100 2026-03-09T00:23:14.474 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.103 metric 100 2026-03-09T00:23:14.477 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-09T00:23:14.477 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-09T00:23:14.480 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-09T00:23:14.480 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-09T00:23:14.480 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-09T00:23:14.480 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-03-09T00:23:14.480 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:3/64 scope link noprefixroute 2026-03-09T00:23:14.480 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-09T00:23:14.480 INFO:teuthology.orchestra.run.vm03.stdout:Mon IP `192.168.123.103` is in CIDR network `192.168.123.0/24` 2026-03-09T00:23:14.480 INFO:teuthology.orchestra.run.vm03.stdout:Mon IP `192.168.123.103` is in CIDR network `192.168.123.0/24` 2026-03-09T00:23:14.480 INFO:teuthology.orchestra.run.vm03.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-03-09T00:23:14.481 INFO:teuthology.orchestra.run.vm03.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-09T00:23:14.481 INFO:teuthology.orchestra.run.vm03.stdout:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T00:23:15.793 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c 2026-03-09T00:23:15.793 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Trying to pull quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T00:23:15.793 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Getting image source signatures 2026-03-09T00:23:15.793 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying blob sha256:1752b8d01aa0dd33bbe0ab24e8316174c94fbdcd5d26252e2680bba0624747a7 2026-03-09T00:23:15.793 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying blob sha256:8e380faede39ebd4286247457b408d979ab568aafd8389c42ec304b8cfba4e92 2026-03-09T00:23:15.793 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying config sha256:654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c 2026-03-09T00:23:15.793 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Writing manifest to image destination 2026-03-09T00:23:16.100 INFO:teuthology.orchestra.run.vm03.stdout:ceph: stdout ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-09T00:23:16.100 INFO:teuthology.orchestra.run.vm03.stdout:Ceph version: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-09T00:23:16.100 INFO:teuthology.orchestra.run.vm03.stdout:Extracting ceph user uid/gid from container image... 2026-03-09T00:23:16.318 INFO:teuthology.orchestra.run.vm03.stdout:stat: stdout 167 167 2026-03-09T00:23:16.318 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial keys... 2026-03-09T00:23:16.566 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQD0Eq5p5monGRAAccJfvsKgB4afPP4aeHRBLw== 2026-03-09T00:23:16.779 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQD0Eq5pbmQqJxAAAb0b5FfyrCgaY6ActhSbhQ== 2026-03-09T00:23:17.017 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQD0Eq5pzAc6NBAAR5trMvPaWzCDktgwjBlVnA== 2026-03-09T00:23:17.018 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial monmap... 2026-03-09T00:23:17.243 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-09T00:23:17.243 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-09T00:23:17.243 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:17.243 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-09T00:23:17.243 INFO:teuthology.orchestra.run.vm03.stdout:monmaptool for a [v2:192.168.123.103:3300,v1:192.168.123.103:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-09T00:23:17.243 INFO:teuthology.orchestra.run.vm03.stdout:setting min_mon_release = quincy 2026-03-09T00:23:17.243 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: set fsid to 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:17.243 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-09T00:23:17.243 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:17.243 INFO:teuthology.orchestra.run.vm03.stdout:Creating mon... 2026-03-09T00:23:17.484 INFO:teuthology.orchestra.run.vm03.stdout:create mon.a on 2026-03-09T00:23:17.761 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-09T00:23:17.885 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86.target → /etc/systemd/system/ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86.target. 2026-03-09T00:23:17.885 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86.target → /etc/systemd/system/ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86.target. 2026-03-09T00:23:18.048 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mon.a 2026-03-09T00:23:18.048 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to reset failed state of unit ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mon.a.service: Unit ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mon.a.service not loaded. 2026-03-09T00:23:18.197 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86.target.wants/ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mon.a.service → /etc/systemd/system/ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@.service. 2026-03-09T00:23:18.379 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-09T00:23:18.379 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to enable service . firewalld.service is not available 2026-03-09T00:23:18.379 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mon to start... 2026-03-09T00:23:18.379 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mon... 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout cluster: 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout id: 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout services: 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum a (age 0.133288s) 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout data: 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout pgs: 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:mon is available 2026-03-09T00:23:18.706 INFO:teuthology.orchestra.run.vm03.stdout:Assimilating anything we can from ceph.conf... 2026-03-09T00:23:19.891 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:19 vm03 ceph-mon[50484]: from='client.? 192.168.123.103:0/1913700306' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [global] 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout fsid = 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [osd] 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-09T00:23:19.964 INFO:teuthology.orchestra.run.vm03.stdout:Generating new minimal ceph.conf... 2026-03-09T00:23:20.255 INFO:teuthology.orchestra.run.vm03.stdout:Restarting the monitor... 2026-03-09T00:23:20.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 systemd[1]: Stopping Ceph mon.a for 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86... 2026-03-09T00:23:20.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a[50460]: 2026-03-09T00:23:20.339+0000 7fa8c633f640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:23:20.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a[50460]: 2026-03-09T00:23:20.339+0000 7fa8c633f640 -1 mon.a@0(leader) e1 *** Got Signal Terminated *** 2026-03-09T00:23:20.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 podman[50755]: 2026-03-09 00:23:20.484682345 +0000 UTC m=+0.160637953 container died 03974f11645e549a148db5d0b42bde0febe45d595fd1213fa43bb099cd872887 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:23:20.814 INFO:teuthology.orchestra.run.vm03.stdout:Setting public_network to 192.168.123.0/24 in mon config section 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 podman[50755]: 2026-03-09 00:23:20.602643309 +0000 UTC m=+0.278598917 container remove 03974f11645e549a148db5d0b42bde0febe45d595fd1213fa43bb099cd872887 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS) 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 bash[50755]: ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 systemd[1]: ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mon.a.service: Deactivated successfully. 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 systemd[1]: Stopped Ceph mon.a for 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86. 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 systemd[1]: Starting Ceph mon.a for 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86... 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 podman[50836]: 2026-03-09 00:23:20.762079071 +0000 UTC m=+0.016195343 container create d732babca9c1f382d37cef46bfec5340c187b09e69d1cd22e789c00d68707b6f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=squid, io.buildah.version=1.41.3) 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 podman[50836]: 2026-03-09 00:23:20.801098403 +0000 UTC m=+0.055214675 container init d732babca9c1f382d37cef46bfec5340c187b09e69d1cd22e789c00d68707b6f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 podman[50836]: 2026-03-09 00:23:20.805368945 +0000 UTC m=+0.059485208 container start d732babca9c1f382d37cef46bfec5340c187b09e69d1cd22e789c00d68707b6f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a, org.label-schema.vendor=CentOS, CEPH_REF=squid, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 bash[50836]: d732babca9c1f382d37cef46bfec5340c187b09e69d1cd22e789c00d68707b6f 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 podman[50836]: 2026-03-09 00:23:20.755956391 +0000 UTC m=+0.010072663 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 systemd[1]: Started Ceph mon.a for 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86. 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 6 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: pidfile_write: ignore empty --pid-file 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: load: jerasure load: lrc 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: RocksDB version: 7.9.2 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Git sha 0 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: DB SUMMARY 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: DB Session ID: ZCTZZWYXB7RNCMNQ7WOG 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: CURRENT file: CURRENT 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: MANIFEST file: MANIFEST-000010 size: 179 Bytes 2026-03-09T00:23:20.857 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000008.sst 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000009.log size: 86395 ; 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.error_if_exists: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.create_if_missing: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.paranoid_checks: 1 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.env: 0x55891c78fdc0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.fs: PosixFileSystem 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.info_log: 0x55891d854700 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.statistics: (nil) 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.use_fsync: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_log_file_size: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.allow_fallocate: 1 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.use_direct_reads: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.db_log_dir: 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.wal_dir: 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.write_buffer_manager: 0x55891d859900 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.unordered_write: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.row_cache: None 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.wal_filter: None 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.two_write_queues: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.wal_compression: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.atomic_flush: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.log_readahead_size: 0 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T00:23:20.858 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_background_jobs: 2 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_background_compactions: -1 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_subcompactions: 1 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_open_files: -1 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_background_flushes: -1 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Compression algorithms supported: 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: kZSTD supported: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: kXpressCompression supported: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: kBZip2Compression supported: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: kLZ4Compression supported: 1 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: kZlibCompression supported: 1 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: kSnappyCompression supported: 1 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000010 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.merge_operator: 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_filter: None 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55891d854640) 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout: cache_index_and_filter_blocks: 1 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout: pin_top_level_index_and_filter: 1 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout: index_type: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout: data_block_index_type: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout: index_shortening: 1 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout: checksum: 4 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout: no_block_cache: 0 2026-03-09T00:23:20.859 INFO:journalctl@ceph.mon.a.vm03.stdout: block_cache: 0x55891d879350 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: block_cache_name: BinnedLRUCache 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: block_cache_options: 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: capacity : 536870912 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: num_shard_bits : 4 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: strict_capacity_limit : 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: high_pri_pool_ratio: 0.000 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: block_cache_compressed: (nil) 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: persistent_cache: (nil) 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: block_size: 4096 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: block_size_deviation: 10 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: block_restart_interval: 16 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: index_block_restart_interval: 1 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: metadata_block_size: 4096 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: partition_filters: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: use_delta_encoding: 1 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: filter_policy: bloomfilter 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: whole_key_filtering: 1 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: verify_compression: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: read_amp_bytes_per_bit: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: format_version: 5 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: enable_index_compression: 1 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: block_align: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: max_auto_readahead_size: 262144 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: prepopulate_block_cache: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: initial_auto_readahead_size: 8192 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout: num_file_reads_for_auto_readahead: 2 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compression: NoCompression 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.num_levels: 7 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T00:23:20.860 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.arena_block_size: 1048576 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.inplace_update_support: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.bloom_locality: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.max_successive_merges: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.ttl: 2592000 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.enable_blob_files: false 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.min_blob_size: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.blob_file_starting_level: 0 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 4818af7b-2362-4da7-9efc-54f40c346b2a 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773015800831903, "job": 1, "event": "recovery_started", "wal_files": [9]} 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773015800833585, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 83357, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 8, "largest_seqno": 243, "table_properties": {"data_size": 81516, "index_size": 230, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 581, "raw_key_size": 9991, "raw_average_key_size": 47, "raw_value_size": 75850, "raw_average_value_size": 361, "num_data_blocks": 10, "num_entries": 210, "num_filter_entries": 210, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773015800, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "4818af7b-2362-4da7-9efc-54f40c346b2a", "db_session_id": "ZCTZZWYXB7RNCMNQ7WOG", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}} 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773015800833673, "job": 1, "event": "recovery_finished"} 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: [db/version_set.cc:5047] Creating manifest 15 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55891d87ae00 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: DB pointer 0x55891d990000 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-09T00:23:20.861 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: ** DB Stats ** 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: ** Compaction Stats [default] ** 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: L0 2/0 83.26 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 56.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Sum 2/0 83.26 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 56.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 56.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: ** Compaction Stats [default] ** 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 56.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Cumulative compaction: 0.00 GB write, 6.46 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Interval compaction: 0.00 GB write, 6.46 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Block cache BinnedLRUCache@0x55891d879350#6 capacity: 512.00 MB usage: 1.19 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 9e-06 secs_since: 0 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: Block cache entry stats(count,size,portion): FilterBlock(2,0.77 KB,0.000146031%) IndexBlock(2,0.42 KB,8.04663e-05%) Misc(1,0.00 KB,0%) 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: starting mon.a rank 0 at public addrs [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] at bind addrs [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon_data /var/lib/ceph/mon/ceph-a fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: mon.a@-1(???) e1 preinit fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: mon.a@-1(???).mds e1 new map 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: mon.a@-1(???).mds e1 print_map 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: e1 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: btime 2026-03-09T00:23:18:421119+0000 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: legacy client fscid: -1 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout: No filesystems configured 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: mon.a@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-09T00:23:20.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: mon.a@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-03-09T00:23:21.135 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-09T00:23:21.135 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: monmap epoch 1 2026-03-09T00:23:21.135 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:21.135 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: last_changed 2026-03-09T00:23:17.121128+0000 2026-03-09T00:23:21.135 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: created 2026-03-09T00:23:17.121128+0000 2026-03-09T00:23:21.136 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: min_mon_release 19 (squid) 2026-03-09T00:23:21.136 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: election_strategy: 1 2026-03-09T00:23:21.136 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.a 2026-03-09T00:23:21.136 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: fsmap 2026-03-09T00:23:21.136 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: osdmap e1: 0 total, 0 up, 0 in 2026-03-09T00:23:21.136 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:20 vm03 ceph-mon[50869]: mgrmap e1: no daemons active 2026-03-09T00:23:21.146 INFO:teuthology.orchestra.run.vm03.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-09T00:23:21.147 INFO:teuthology.orchestra.run.vm03.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-09T00:23:21.147 INFO:teuthology.orchestra.run.vm03.stdout:Creating mgr... 2026-03-09T00:23:21.148 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-09T00:23:21.148 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-09T00:23:21.295 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mgr.x 2026-03-09T00:23:21.295 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to reset failed state of unit ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mgr.x.service: Unit ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mgr.x.service not loaded. 2026-03-09T00:23:21.423 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86.target.wants/ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mgr.x.service → /etc/systemd/system/ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@.service. 2026-03-09T00:23:21.442 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:21 vm03 systemd[1]: Starting Ceph mgr.x for 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86... 2026-03-09T00:23:21.602 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-09T00:23:21.602 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to enable service . firewalld.service is not available 2026-03-09T00:23:21.603 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-09T00:23:21.603 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to open ports <[9283, 8765]>. firewalld.service is not available 2026-03-09T00:23:21.603 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr to start... 2026-03-09T00:23:21.603 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr... 2026-03-09T00:23:21.693 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:21 vm03 podman[51091]: 2026-03-09 00:23:21.536080891 +0000 UTC m=+0.016832626 container create e5100866eb953ed86f7dfec12c03da5e6b8c7b956d80c7f0e8aa2d1880451d41 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_REF=squid, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.vendor=CentOS) 2026-03-09T00:23:21.693 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:21 vm03 podman[51091]: 2026-03-09 00:23:21.587170734 +0000 UTC m=+0.067922469 container init e5100866eb953ed86f7dfec12c03da5e6b8c7b956d80c7f0e8aa2d1880451d41 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T00:23:21.693 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:21 vm03 podman[51091]: 2026-03-09 00:23:21.591706705 +0000 UTC m=+0.072458429 container start e5100866eb953ed86f7dfec12c03da5e6b8c7b956d80c7f0e8aa2d1880451d41 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0) 2026-03-09T00:23:21.693 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:21 vm03 bash[51091]: e5100866eb953ed86f7dfec12c03da5e6b8c7b956d80c7f0e8aa2d1880451d41 2026-03-09T00:23:21.693 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:21 vm03 podman[51091]: 2026-03-09 00:23:21.528890292 +0000 UTC m=+0.009642037 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:23:21.693 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:21 vm03 systemd[1]: Started Ceph mgr.x for 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86. 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86", 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 0, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-09T00:23:21.927 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-09T00:23:18:421119+0000", 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-09T00:23:18.421855+0000", 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T00:23:21.928 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (1/15)... 2026-03-09T00:23:21.994 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:21 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:21.696+0000 7fe05c2bc140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:23:21.995 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:21 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:21.749+0000 7fe05c2bc140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:23:22.251 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:22 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:22.158+0000 7fe05c2bc140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:23:22.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:21 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2456161255' entity='client.admin' 2026-03-09T00:23:22.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:21 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2549353839' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T00:23:22.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:22 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:22.467+0000 7fe05c2bc140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:23:22.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:22 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T00:23:22.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:22 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T00:23:22.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:22 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: from numpy import show_config as show_numpy_config 2026-03-09T00:23:22.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:22 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:22.554+0000 7fe05c2bc140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:23:22.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:22 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:22.591+0000 7fe05c2bc140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:23:22.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:22 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:22.664+0000 7fe05c2bc140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:23:23.430 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:23 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:23.158+0000 7fe05c2bc140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:23:23.430 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:23 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:23.275+0000 7fe05c2bc140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:23:23.430 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:23 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:23.316+0000 7fe05c2bc140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:23:23.430 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:23 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:23.352+0000 7fe05c2bc140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:23:23.430 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:23 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:23.393+0000 7fe05c2bc140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:23:23.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:23 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:23.429+0000 7fe05c2bc140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:23:23.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:23 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:23.611+0000 7fe05c2bc140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:23:23.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:23 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:23.660+0000 7fe05c2bc140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:23:24.140 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:23 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:23.879+0000 7fe05c2bc140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86", 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-09T00:23:24.289 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-09T00:23:18:421119+0000", 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-09T00:23:18.421855+0000", 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T00:23:24.290 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (2/15)... 2026-03-09T00:23:24.454 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:24 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/111588998' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T00:23:24.454 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:24 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:24.182+0000 7fe05c2bc140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:23:24.454 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:24 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:24.218+0000 7fe05c2bc140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:23:24.454 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:24 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:24.259+0000 7fe05c2bc140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:23:24.454 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:24 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:24.341+0000 7fe05c2bc140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:23:24.454 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:24 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:24.376+0000 7fe05c2bc140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:23:24.454 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:24 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:24.453+0000 7fe05c2bc140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:23:24.733 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:24 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:24.562+0000 7fe05c2bc140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:23:24.733 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:24 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:24.696+0000 7fe05c2bc140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:23:24.733 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:24 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:24.732+0000 7fe05c2bc140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: Activating manager daemon x 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: mgrmap e2: x(active, starting, since 0.00449945s) 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: from='mgr.14100 192.168.123.103:0/2253192326' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: from='mgr.14100 192.168.123.103:0/2253192326' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: from='mgr.14100 192.168.123.103:0/2253192326' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: from='mgr.14100 192.168.123.103:0/2253192326' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: from='mgr.14100 192.168.123.103:0/2253192326' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: Manager daemon x is now available 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: from='mgr.14100 192.168.123.103:0/2253192326' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: from='mgr.14100 192.168.123.103:0/2253192326' entity='mgr.x' 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: from='mgr.14100 192.168.123.103:0/2253192326' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: from='mgr.14100 192.168.123.103:0/2253192326' entity='mgr.x' 2026-03-09T00:23:25.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:25 vm03 ceph-mon[50869]: from='mgr.14100 192.168.123.103:0/2253192326' entity='mgr.x' 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86", 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T00:23:26.712 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 5, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-09T00:23:18:421119+0000", 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T00:23:26.713 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T00:23:26.714 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:26.714 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-09T00:23:26.714 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T00:23:26.714 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-09T00:23:18.421855+0000", 2026-03-09T00:23:26.714 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T00:23:26.714 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T00:23:26.714 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-09T00:23:26.714 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T00:23:26.714 INFO:teuthology.orchestra.run.vm03.stdout:mgr is available 2026-03-09T00:23:26.829 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:26 vm03 ceph-mon[50869]: mgrmap e3: x(active, since 1.00861s) 2026-03-09T00:23:26.829 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:26 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2116598239' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [global] 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout fsid = 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [osd] 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-09T00:23:27.092 INFO:teuthology.orchestra.run.vm03.stdout:Enabling cephadm module... 2026-03-09T00:23:28.001 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:27 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: ignoring --setuser ceph since I am not root 2026-03-09T00:23:28.001 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:27 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: ignoring --setgroup ceph since I am not root 2026-03-09T00:23:28.001 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:27 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:27.874+0000 7fabff86d140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:23:28.001 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:27 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:27.942+0000 7fabff86d140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:23:28.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:27 vm03 ceph-mon[50869]: mgrmap e4: x(active, since 2s) 2026-03-09T00:23:28.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:27 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/966023776' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-09T00:23:28.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:27 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/1817807375' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-09T00:23:28.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T00:23:28.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-03-09T00:23:28.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-09T00:23:28.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "active_name": "x", 2026-03-09T00:23:28.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-09T00:23:28.305 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T00:23:28.305 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for the mgr to restart... 2026-03-09T00:23:28.305 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr epoch 5... 2026-03-09T00:23:28.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:28 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:28.422+0000 7fabff86d140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:23:29.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:28 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/1817807375' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-09T00:23:29.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:28 vm03 ceph-mon[50869]: mgrmap e5: x(active, since 3s) 2026-03-09T00:23:29.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:28 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/694285774' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-09T00:23:29.251 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:28 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:28.766+0000 7fabff86d140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:23:29.252 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:28 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T00:23:29.252 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:28 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T00:23:29.252 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:28 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: from numpy import show_config as show_numpy_config 2026-03-09T00:23:29.252 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:28 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:28.853+0000 7fabff86d140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:23:29.252 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:28 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:28.889+0000 7fabff86d140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:23:29.252 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:28 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:28.963+0000 7fabff86d140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:23:29.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:29 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:29.471+0000 7fabff86d140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:23:29.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:29 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:29.589+0000 7fabff86d140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:23:29.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:29 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:29.633+0000 7fabff86d140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:23:29.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:29 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:29.671+0000 7fabff86d140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:23:29.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:29 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:29.713+0000 7fabff86d140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:23:30.199 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:29 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:29.751+0000 7fabff86d140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:23:30.199 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:29 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:29.921+0000 7fabff86d140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:23:30.199 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:29 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:29.973+0000 7fabff86d140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:23:30.486 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:30 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:30.198+0000 7fabff86d140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T00:23:30.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:30 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:30.485+0000 7fabff86d140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:23:30.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:30 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:30.523+0000 7fabff86d140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:23:30.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:30 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:30.565+0000 7fabff86d140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:23:30.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:30 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:30.645+0000 7fabff86d140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:23:30.751 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:30 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:30.688+0000 7fabff86d140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:23:31.039 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:30 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:30.772+0000 7fabff86d140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:23:31.039 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:30 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:30.894+0000 7fabff86d140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:23:31.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: Active manager daemon x restarted 2026-03-09T00:23:31.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: Activating manager daemon x 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: osdmap e2: 0 total, 0 up, 0 in 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: mgrmap e6: x(active, starting, since 0.00471886s) 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: Manager daemon x is now available 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:31 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:31 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:31.038+0000 7fabff86d140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:23:31.502 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:31 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:31.078+0000 7fabff86d140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:23:32.225 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T00:23:32.225 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-03-09T00:23:32.225 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-09T00:23:32.225 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T00:23:32.225 INFO:teuthology.orchestra.run.vm03.stdout:mgr epoch 5 is available 2026-03-09T00:23:32.225 INFO:teuthology.orchestra.run.vm03.stdout:Setting orchestrator backend to cephadm... 2026-03-09T00:23:32.393 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:32 vm03 ceph-mon[50869]: Found migration_current of "None". Setting to last migration. 2026-03-09T00:23:32.393 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:32 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:32.393 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:32 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:32.393 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:32 vm03 ceph-mon[50869]: mgrmap e7: x(active, since 1.00852s) 2026-03-09T00:23:32.393 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:32 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:32.978 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-09T00:23:32.978 INFO:teuthology.orchestra.run.vm03.stdout:Generating ssh key... 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: Generating public/private ed25519 key pair. 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: Your identification has been saved in /tmp/tmp62q64ejz/key 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: Your public key has been saved in /tmp/tmp62q64ejz/key.pub 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: The key fingerprint is: 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: SHA256:z6tgdjRJppP+oHMUxvyscD48wUsuKP+tmptPU7mBV1o ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: The key's randomart image is: 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: +--[ED25519 256]--+ 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: | | 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: | | 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: | o E | 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: | .=X . | 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: | .oX+S | 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: | .+*=o+ | 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: | .oOB+. o | 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: |. .+o*X= . | 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: | o*===.oo.. | 2026-03-09T00:23:33.231 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:33 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: +----[SHA256]-----+ 2026-03-09T00:23:33.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: [09/Mar/2026:00:23:31] ENGINE Bus STARTING 2026-03-09T00:23:33.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: [09/Mar/2026:00:23:32] ENGINE Serving on http://192.168.123.103:8765 2026-03-09T00:23:33.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: from='client.14122 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-09T00:23:33.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: from='client.14122 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-09T00:23:33.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: [09/Mar/2026:00:23:32] ENGINE Serving on https://192.168.123.103:7150 2026-03-09T00:23:33.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: [09/Mar/2026:00:23:32] ENGINE Bus STARTED 2026-03-09T00:23:33.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: [09/Mar/2026:00:23:32] ENGINE Client ('192.168.123.103', 50872) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:23:33.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: from='client.14130 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:33.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:33.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:33.487 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:33.487 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:33 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:33.757 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP6EpnwBvLaEHyx1NmGXJnMnrzX2VfDi0+xxoV5WJLmc ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:33.758 INFO:teuthology.orchestra.run.vm03.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-09T00:23:33.758 INFO:teuthology.orchestra.run.vm03.stdout:Adding key to root@localhost authorized_keys... 2026-03-09T00:23:33.758 INFO:teuthology.orchestra.run.vm03.stdout:Adding host vm03... 2026-03-09T00:23:34.619 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:34 vm03 ceph-mon[50869]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:34.619 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:34 vm03 ceph-mon[50869]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:34.619 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:34 vm03 ceph-mon[50869]: Generating ssh key... 2026-03-09T00:23:34.619 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:34 vm03 ceph-mon[50869]: mgrmap e8: x(active, since 2s) 2026-03-09T00:23:34.619 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:34 vm03 ceph-mon[50869]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:35.479 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:35 vm03 ceph-mon[50869]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm03", "addr": "192.168.123.103", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:35.479 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:35 vm03 ceph-mon[50869]: Deploying cephadm binary to vm03 2026-03-09T00:23:35.643 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Added host 'vm03' with addr '192.168.123.103' 2026-03-09T00:23:35.643 INFO:teuthology.orchestra.run.vm03.stdout:Deploying unmanaged mon service... 2026-03-09T00:23:36.061 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-09T00:23:36.061 INFO:teuthology.orchestra.run.vm03.stdout:Deploying unmanaged mgr service... 2026-03-09T00:23:36.440 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-09T00:23:36.821 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:36 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:36.821 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:36 vm03 ceph-mon[50869]: Added host vm03 2026-03-09T00:23:36.821 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:36 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:36.822 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:36 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:36.822 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:36 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:37.374 INFO:teuthology.orchestra.run.vm03.stdout:Enabling the dashboard module... 2026-03-09T00:23:38.001 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:37 vm03 ceph-mon[50869]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:38.001 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:37 vm03 ceph-mon[50869]: Saving service mon spec with placement count:5 2026-03-09T00:23:38.001 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:37 vm03 ceph-mon[50869]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:38.001 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:37 vm03 ceph-mon[50869]: Saving service mgr spec with placement count:2 2026-03-09T00:23:38.001 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:37 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2859526464' entity='client.admin' 2026-03-09T00:23:38.001 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:37 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/1033620583' entity='client.admin' 2026-03-09T00:23:38.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:37 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:38.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:37 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:38.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:37 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/3276965622' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-09T00:23:38.803 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:38 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: ignoring --setuser ceph since I am not root 2026-03-09T00:23:38.804 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:38 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: ignoring --setgroup ceph since I am not root 2026-03-09T00:23:38.804 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:38 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:38.738+0000 7fb71df78140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:23:39.167 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:38 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:39.167 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:38 vm03 ceph-mon[50869]: from='mgr.14118 192.168.123.103:0/3308723832' entity='mgr.x' 2026-03-09T00:23:39.167 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:38 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/3276965622' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-09T00:23:39.167 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:38 vm03 ceph-mon[50869]: mgrmap e9: x(active, since 7s) 2026-03-09T00:23:39.167 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:38 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:38.811+0000 7fb71df78140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:23:39.179 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T00:23:39.179 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-03-09T00:23:39.179 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-09T00:23:39.179 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "active_name": "x", 2026-03-09T00:23:39.179 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-09T00:23:39.179 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T00:23:39.179 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for the mgr to restart... 2026-03-09T00:23:39.179 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr epoch 9... 2026-03-09T00:23:39.501 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:39 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:39.290+0000 7fb71df78140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:23:39.939 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:39 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:39.621+0000 7fb71df78140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:23:39.939 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:39 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T00:23:39.939 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:39 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T00:23:39.939 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:39 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: from numpy import show_config as show_numpy_config 2026-03-09T00:23:39.939 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:39 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:39.711+0000 7fb71df78140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:23:39.939 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:39 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:39.749+0000 7fb71df78140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:23:39.939 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:39 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:39.820+0000 7fb71df78140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:23:40.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:39 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2333507539' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-09T00:23:40.572 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:40 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:40.309+0000 7fb71df78140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:23:40.572 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:40 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:40.418+0000 7fb71df78140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:23:40.572 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:40 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:40.458+0000 7fb71df78140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:23:40.572 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:40 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:40.492+0000 7fb71df78140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:23:40.572 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:40 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:40.534+0000 7fb71df78140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:23:41.001 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:40 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:40.571+0000 7fb71df78140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:23:41.001 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:40 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:40.744+0000 7fb71df78140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:23:41.001 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:40 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:40.796+0000 7fb71df78140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:23:41.290 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:41 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:41.017+0000 7fb71df78140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T00:23:41.555 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:41 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:41.289+0000 7fb71df78140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:23:41.555 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:41 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:41.326+0000 7fb71df78140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:23:41.555 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:41 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:41.367+0000 7fb71df78140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:23:41.555 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:41 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:41.441+0000 7fb71df78140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:23:41.555 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:41 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:41.477+0000 7fb71df78140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:23:41.838 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:41 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:41.554+0000 7fb71df78140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:23:41.838 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:41 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:41.664+0000 7fb71df78140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:23:41.838 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:41 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:41.801+0000 7fb71df78140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:23:42.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: Active manager daemon x restarted 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: Activating manager daemon x 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: osdmap e3: 0 total, 0 up, 0 in 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: mgrmap e10: x(active, starting, since 0.00388174s) 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: Manager daemon x is now available 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:41 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:23:42.252 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:23:41 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:23:41.837+0000 7fb71df78140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:23:42.991 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T00:23:42.991 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-03-09T00:23:42.991 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-09T00:23:42.991 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T00:23:42.991 INFO:teuthology.orchestra.run.vm03.stdout:mgr epoch 9 is available 2026-03-09T00:23:42.992 INFO:teuthology.orchestra.run.vm03.stdout:Generating a dashboard self-signed certificate... 2026-03-09T00:23:43.113 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:42 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:23:43.113 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:42 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:43.113 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:42 vm03 ceph-mon[50869]: mgrmap e11: x(active, since 1.00665s) 2026-03-09T00:23:43.113 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:42 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:43.454 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-09T00:23:43.454 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial admin user... 2026-03-09T00:23:43.969 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$K4Kz5T2552hZLi5JE0wcm.eKw8PJojRXFaeid4y5CRe.9MVQl.xem", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773015823, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-09T00:23:43.969 INFO:teuthology.orchestra.run.vm03.stdout:Fetching dashboard port number... 2026-03-09T00:23:44.337 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 8443 2026-03-09T00:23:44.337 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-09T00:23:44.337 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-09T00:23:44.340 INFO:teuthology.orchestra.run.vm03.stdout:Ceph Dashboard is now available at: 2026-03-09T00:23:44.340 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:44.340 INFO:teuthology.orchestra.run.vm03.stdout: URL: https://vm03.local:8443/ 2026-03-09T00:23:44.340 INFO:teuthology.orchestra.run.vm03.stdout: User: admin 2026-03-09T00:23:44.340 INFO:teuthology.orchestra.run.vm03.stdout: Password: 8jbf4rc7e3 2026-03-09T00:23:44.340 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: [09/Mar/2026:00:23:42] ENGINE Bus STARTING 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: [09/Mar/2026:00:23:42] ENGINE Serving on http://192.168.123.103:8765 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: [09/Mar/2026:00:23:42] ENGINE Serving on https://192.168.123.103:7150 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: [09/Mar/2026:00:23:42] ENGINE Bus STARTED 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: [09/Mar/2026:00:23:42] ENGINE Client ('192.168.123.103', 40940) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: from='client.14162 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:44.340 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:44 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/3582992113' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-09T00:23:44.340 INFO:teuthology.orchestra.run.vm03.stdout:Saving cluster configuration to /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/config directory 2026-03-09T00:23:44.775 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout:Or, if you are only running a single cluster on this host: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: ceph telemetry on 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout:For more information see: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:44.776 INFO:teuthology.orchestra.run.vm03.stdout:Bootstrap complete. 2026-03-09T00:23:44.806 INFO:tasks.cephadm:Fetching config... 2026-03-09T00:23:44.806 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T00:23:44.806 DEBUG:teuthology.orchestra.run.vm03:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-09T00:23:44.824 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-09T00:23:44.825 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T00:23:44.825 DEBUG:teuthology.orchestra.run.vm03:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-09T00:23:44.886 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-09T00:23:44.886 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T00:23:44.886 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/keyring of=/dev/stdout 2026-03-09T00:23:44.957 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-09T00:23:44.957 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T00:23:44.957 DEBUG:teuthology.orchestra.run.vm03:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-09T00:23:45.019 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-09T00:23:45.019 DEBUG:teuthology.orchestra.run.vm03:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP6EpnwBvLaEHyx1NmGXJnMnrzX2VfDi0+xxoV5WJLmc ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-09T00:23:45.120 INFO:teuthology.orchestra.run.vm03.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP6EpnwBvLaEHyx1NmGXJnMnrzX2VfDi0+xxoV5WJLmc ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:45.138 DEBUG:teuthology.orchestra.run.vm06:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP6EpnwBvLaEHyx1NmGXJnMnrzX2VfDi0+xxoV5WJLmc ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-09T00:23:45.176 INFO:teuthology.orchestra.run.vm06.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP6EpnwBvLaEHyx1NmGXJnMnrzX2VfDi0+xxoV5WJLmc ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:45.186 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-09T00:23:45.383 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:23:45.752 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:45 vm03 ceph-mon[50869]: from='client.14164 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:45.752 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:45 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/1543499215' entity='client.admin' 2026-03-09T00:23:45.752 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:45 vm03 ceph-mon[50869]: mgrmap e12: x(active, since 2s) 2026-03-09T00:23:45.870 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-09T00:23:45.870 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-09T00:23:46.167 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:23:46.642 INFO:tasks.cephadm:Remote vm06 excluded from cephadm cluster by role 2026-03-09T00:23:46.642 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-09T00:23:46.642 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph osd crush tunables default 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/1430980761' entity='client.admin' 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='client.14172 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:23:46.753 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:46 vm03 ceph-mon[50869]: Updating vm03:/etc/ceph/ceph.conf 2026-03-09T00:23:46.910 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:23:47.684 INFO:teuthology.orchestra.run.vm03.stderr:adjusted tunables profile to default 2026-03-09T00:23:47.866 INFO:tasks.cephadm:Adding mon.a on vm03 2026-03-09T00:23:47.866 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch apply mon '1;vm03:192.168.123.103=a' 2026-03-09T00:23:48.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:47 vm03 ceph-mon[50869]: Updating vm03:/var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/config/ceph.conf 2026-03-09T00:23:48.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:47 vm03 ceph-mon[50869]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:23:48.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:47 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/335179382' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-09T00:23:48.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:47 vm03 ceph-mon[50869]: Updating vm03:/var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/config/ceph.client.admin.keyring 2026-03-09T00:23:48.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:47 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:48.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:47 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:48.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:47 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:48.037 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:23:48.290 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled mon update... 2026-03-09T00:23:48.453 INFO:tasks.cephadm:Waiting for 1 mons in monmap... 2026-03-09T00:23:48.453 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph mon dump -f json 2026-03-09T00:23:48.697 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/335179382' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "1;vm03:192.168.123.103=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: Saving service mon spec with placement vm03:192.168.123.103=a;count:1 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:48.698 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:48.780 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:23:49.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:49.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: Reconfiguring mon.a (unknown last config time)... 2026-03-09T00:23:49.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:23:49.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:23:49.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:49.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:48 vm03 ceph-mon[50869]: Reconfiguring daemon mon.a on vm03 2026-03-09T00:23:49.045 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:49.046 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86","modified":"2026-03-09T00:23:17.121128Z","created":"2026-03-09T00:23:17.121128Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T00:23:49.046 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-09T00:23:49.239 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-09T00:23:49.239 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph config generate-minimal-conf 2026-03-09T00:23:49.415 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:23:49.652 INFO:teuthology.orchestra.run.vm03.stdout:# minimal ceph.conf for 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:49.652 INFO:teuthology.orchestra.run.vm03.stdout:[global] 2026-03-09T00:23:49.652 INFO:teuthology.orchestra.run.vm03.stdout: fsid = 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:23:49.653 INFO:teuthology.orchestra.run.vm03.stdout: mon_host = [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] 2026-03-09T00:23:49.704 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:49 vm03 ceph-mon[50869]: mgrmap e13: x(active, since 6s) 2026-03-09T00:23:49.704 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:49 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:49.704 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:49 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:49.704 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:49 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/3087957029' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T00:23:49.704 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:49 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/694358741' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:49.810 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-09T00:23:49.811 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T00:23:49.811 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.conf 2026-03-09T00:23:49.838 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T00:23:49.838 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:23:49.903 INFO:tasks.cephadm:Adding mgr.x on vm03 2026-03-09T00:23:49.903 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch apply mgr '1;vm03=x' 2026-03-09T00:23:50.110 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:23:50.364 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled mgr update... 2026-03-09T00:23:50.634 INFO:tasks.cephadm:Deploying OSDs... 2026-03-09T00:23:50.634 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T00:23:50.634 DEBUG:teuthology.orchestra.run.vm03:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T00:23:50.665 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:23:50.666 DEBUG:teuthology.orchestra.run.vm03:> ls /dev/[sv]d? 2026-03-09T00:23:50.739 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vda 2026-03-09T00:23:50.739 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdb 2026-03-09T00:23:50.739 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdc 2026-03-09T00:23:50.739 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdd 2026-03-09T00:23:50.739 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vde 2026-03-09T00:23:50.739 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-09T00:23:50.739 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-09T00:23:50.739 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdb 2026-03-09T00:23:50.800 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdb 2026-03-09T00:23:50.800 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T00:23:50.800 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 221 Links: 1 Device type: fc,10 2026-03-09T00:23:50.800 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T00:23:50.800 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T00:23:50.800 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-09 00:23:46.997400806 +0000 2026-03-09T00:23:50.800 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-09 00:21:32.462966993 +0000 2026-03-09T00:23:50.800 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-09 00:21:32.462966993 +0000 2026-03-09T00:23:50.800 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-09 00:17:10.311000000 +0000 2026-03-09T00:23:50.800 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-09T00:23:50.874 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-09T00:23:50.874 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-09T00:23:50.874 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 9.8544e-05 s, 5.2 MB/s 2026-03-09T00:23:50.875 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-09T00:23:50.935 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdc 2026-03-09T00:23:50.994 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdc 2026-03-09T00:23:50.994 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T00:23:50.994 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 245 Links: 1 Device type: fc,20 2026-03-09T00:23:50.994 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T00:23:50.994 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T00:23:50.994 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-09 00:23:47.084400913 +0000 2026-03-09T00:23:50.994 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-09 00:21:32.465966996 +0000 2026-03-09T00:23:50.994 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-09 00:21:32.465966996 +0000 2026-03-09T00:23:50.994 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-09 00:17:10.321000000 +0000 2026-03-09T00:23:50.994 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-09T00:23:51.060 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-09T00:23:51.061 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-09T00:23:51.061 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000180589 s, 2.8 MB/s 2026-03-09T00:23:51.062 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-09T00:23:51.118 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdd 2026-03-09T00:23:51.172 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdd 2026-03-09T00:23:51.172 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T00:23:51.172 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-09T00:23:51.172 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T00:23:51.172 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T00:23:51.172 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-09 00:23:47.172401020 +0000 2026-03-09T00:23:51.172 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-09 00:21:32.510967039 +0000 2026-03-09T00:23:51.173 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-09 00:21:32.510967039 +0000 2026-03-09T00:23:51.173 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-09 00:17:10.326000000 +0000 2026-03-09T00:23:51.173 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-09T00:23:51.234 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-09T00:23:51.234 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-09T00:23:51.234 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.00011767 s, 4.4 MB/s 2026-03-09T00:23:51.235 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-09T00:23:51.290 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vde 2026-03-09T00:23:51.345 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vde 2026-03-09T00:23:51.345 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T00:23:51.345 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-09T00:23:51.345 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T00:23:51.345 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T00:23:51.346 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-09 00:23:47.239401102 +0000 2026-03-09T00:23:51.346 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-09 00:21:32.475967006 +0000 2026-03-09T00:23:51.346 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-09 00:21:32.475967006 +0000 2026-03-09T00:23:51.346 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-09 00:17:10.330000000 +0000 2026-03-09T00:23:51.346 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-09T00:23:51.407 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-09T00:23:51.408 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-09T00:23:51.408 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000107441 s, 4.8 MB/s 2026-03-09T00:23:51.409 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-09T00:23:51.466 INFO:tasks.cephadm:Deploying osd.0 on vm03 with /dev/vde... 2026-03-09T00:23:51.466 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- lvm zap /dev/vde 2026-03-09T00:23:51.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "1;vm03=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:51.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: Saving service mgr spec with placement vm03=x;count:1 2026-03-09T00:23:51.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:51.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:51.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:51.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:23:51.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:51.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:51.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: Reconfiguring mgr.x (unknown last config time)... 2026-03-09T00:23:51.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:23:51.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:23:51.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:51.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: Reconfiguring daemon mgr.x on vm03 2026-03-09T00:23:51.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:51.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:51 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:23:51.674 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:23:52.669 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:23:52.687 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch daemon add osd vm03:/dev/vde 2026-03-09T00:23:52.859 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:23:53.158 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:53 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T00:23:53.158 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:53 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T00:23:53.158 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:53 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:54.419 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:54 vm03 ceph-mon[50869]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:54.419 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:54 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/3579738303' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5ad1f957-0222-41fb-9fa0-cddd8b39e373"}]: dispatch 2026-03-09T00:23:54.419 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:54 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/3579738303' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5ad1f957-0222-41fb-9fa0-cddd8b39e373"}]': finished 2026-03-09T00:23:54.419 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:54 vm03 ceph-mon[50869]: osdmap e5: 1 total, 0 up, 1 in 2026-03-09T00:23:54.419 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:54 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:23:55.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:55 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2833384953' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T00:23:58.398 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:58 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T00:23:58.398 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:58 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:59.479 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:23:59 vm03 ceph-mon[50869]: Deploying daemon osd.0 on vm03 2026-03-09T00:24:00.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:00 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:00.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:00 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:00.859 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:00 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:01.876 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 0 on host 'vm03' 2026-03-09T00:24:02.032 DEBUG:teuthology.orchestra.run.vm03:osd.0> sudo journalctl -f -n 0 -u ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@osd.0.service 2026-03-09T00:24:02.034 INFO:tasks.cephadm:Deploying osd.1 on vm03 with /dev/vdd... 2026-03-09T00:24:02.034 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- lvm zap /dev/vdd 2026-03-09T00:24:02.332 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:24:02 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0[63478]: 2026-03-09T00:24:02.252+0000 7f4f3a0dd740 -1 osd.0 0 log_to_monitors true 2026-03-09T00:24:02.332 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:02 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:02.332 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:02 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:02.332 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:02 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:02.332 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:02 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:02.332 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:02 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:02.332 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:02 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:02.332 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:02 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:02.332 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:02 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:02.369 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:03.410 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:03 vm03 ceph-mon[50869]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T00:24:03.411 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:03 vm03 ceph-mon[50869]: from='osd.0 [v2:192.168.123.103:6802/1273528910,v1:192.168.123.103:6803/1273528910]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T00:24:03.411 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:03 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:03.411 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:03 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:03.411 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:03 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:24:03.411 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:03 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:03.411 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:03 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:03.411 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:03 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:03.937 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:03.960 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch daemon add osd vm03:/dev/vdd 2026-03-09T00:24:04.126 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:04.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:04 vm03 ceph-mon[50869]: Detected new or changed devices on vm03 2026-03-09T00:24:04.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:04 vm03 ceph-mon[50869]: Adjusting osd_memory_target on vm03 to 257.0M 2026-03-09T00:24:04.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:04 vm03 ceph-mon[50869]: Unable to set osd_memory_target on vm03 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-09T00:24:04.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:04 vm03 ceph-mon[50869]: from='osd.0 [v2:192.168.123.103:6802/1273528910,v1:192.168.123.103:6803/1273528910]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T00:24:04.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:04 vm03 ceph-mon[50869]: osdmap e6: 1 total, 0 up, 1 in 2026-03-09T00:24:04.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:04 vm03 ceph-mon[50869]: from='osd.0 [v2:192.168.123.103:6802/1273528910,v1:192.168.123.103:6803/1273528910]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-09T00:24:04.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:04 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:24:05.196 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:24:05 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0[63478]: 2026-03-09T00:24:05.126+0000 7f4f3605e640 -1 osd.0 0 waiting for initial osdmap 2026-03-09T00:24:05.196 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:24:05 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0[63478]: 2026-03-09T00:24:05.136+0000 7f4f31687640 -1 osd.0 7 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T00:24:05.485 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:05 vm03 ceph-mon[50869]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T00:24:05.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:05 vm03 ceph-mon[50869]: from='osd.0 [v2:192.168.123.103:6802/1273528910,v1:192.168.123.103:6803/1273528910]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-09T00:24:05.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:05 vm03 ceph-mon[50869]: osdmap e7: 1 total, 0 up, 1 in 2026-03-09T00:24:05.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:05 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:24:05.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:05 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:24:05.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:05 vm03 ceph-mon[50869]: from='client.14193 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:24:05.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:05 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T00:24:05.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:05 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T00:24:05.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:05 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:05.486 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:05 vm03 ceph-mon[50869]: from='osd.0 [v2:192.168.123.103:6802/1273528910,v1:192.168.123.103:6803/1273528910]' entity='osd.0' 2026-03-09T00:24:06.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:06 vm03 ceph-mon[50869]: purged_snaps scrub starts 2026-03-09T00:24:06.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:06 vm03 ceph-mon[50869]: purged_snaps scrub ok 2026-03-09T00:24:06.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:06 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:24:06.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:06 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2704939431' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0ebc951a-85ad-4c95-abb8-da144761d783"}]: dispatch 2026-03-09T00:24:06.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:06 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2704939431' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0ebc951a-85ad-4c95-abb8-da144761d783"}]': finished 2026-03-09T00:24:06.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:06 vm03 ceph-mon[50869]: osd.0 [v2:192.168.123.103:6802/1273528910,v1:192.168.123.103:6803/1273528910] boot 2026-03-09T00:24:06.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:06 vm03 ceph-mon[50869]: osdmap e8: 2 total, 1 up, 2 in 2026-03-09T00:24:06.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:06 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:24:06.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:06 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:24:06.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:06 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/1938151747' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T00:24:07.751 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:07 vm03 ceph-mon[50869]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T00:24:07.751 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:07 vm03 ceph-mon[50869]: osdmap e9: 2 total, 1 up, 2 in 2026-03-09T00:24:07.751 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:07 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:24:09.488 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:09 vm03 ceph-mon[50869]: pgmap v12: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-09T00:24:10.666 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:10 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T00:24:10.666 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:10 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:11.675 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:11 vm03 ceph-mon[50869]: pgmap v13: 0 pgs: ; 0 B data, 426 MiB used, 20 GiB / 20 GiB avail 2026-03-09T00:24:11.676 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:11 vm03 ceph-mon[50869]: Deploying daemon osd.1 on vm03 2026-03-09T00:24:12.627 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:12 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:12.627 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:12 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:12.627 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:12 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:13.659 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:13 vm03 ceph-mon[50869]: pgmap v14: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-09T00:24:13.659 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:13 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:13.659 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:13 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:13.659 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:13 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:13.659 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:13 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:13.659 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:13 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:13.660 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 1 on host 'vm03' 2026-03-09T00:24:13.862 DEBUG:teuthology.orchestra.run.vm03:osd.1> sudo journalctl -f -n 0 -u ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@osd.1.service 2026-03-09T00:24:13.864 INFO:tasks.cephadm:Waiting for 2 OSDs to come up... 2026-03-09T00:24:13.864 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph osd stat -f json 2026-03-09T00:24:14.188 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:14.464 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:14.478 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:14 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:14.478 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:14 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:14.478 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:14 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:14.478 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:14 vm03 ceph-mon[50869]: from='osd.1 [v2:192.168.123.103:6810/3069294450,v1:192.168.123.103:6811/3069294450]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T00:24:14.631 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":9,"num_osds":2,"num_up_osds":1,"osd_up_since":1773015845,"num_in_osds":2,"osd_in_since":1773015845,"num_remapped_pgs":0} 2026-03-09T00:24:15.633 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph osd stat -f json 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: pgmap v15: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/266427533' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: from='osd.1 [v2:192.168.123.103:6810/3069294450,v1:192.168.123.103:6811/3069294450]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: osdmap e10: 2 total, 1 up, 2 in 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: from='osd.1 [v2:192.168.123.103:6810/3069294450,v1:192.168.123.103:6811/3069294450]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd.1", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:15.663 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:15 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:15.823 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:16.096 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:16.259 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":11,"num_osds":2,"num_up_osds":1,"osd_up_since":1773015845,"num_in_osds":2,"osd_in_since":1773015845,"num_remapped_pgs":0} 2026-03-09T00:24:16.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:16 vm03 ceph-mon[50869]: Detected new or changed devices on vm03 2026-03-09T00:24:16.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:16 vm03 ceph-mon[50869]: Adjusting osd_memory_target on vm03 to 128.5M 2026-03-09T00:24:16.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:16 vm03 ceph-mon[50869]: Unable to set osd_memory_target on vm03 to 134768230: error parsing value: Value '134768230' is below minimum 939524096 2026-03-09T00:24:16.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:16 vm03 ceph-mon[50869]: from='osd.1 [v2:192.168.123.103:6810/3069294450,v1:192.168.123.103:6811/3069294450]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-09T00:24:16.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:16 vm03 ceph-mon[50869]: osdmap e11: 2 total, 1 up, 2 in 2026-03-09T00:24:16.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:16 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:24:16.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:16 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:24:16.502 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:16 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/210627163' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T00:24:17.002 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:24:16 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1[68677]: 2026-03-09T00:24:16.532+0000 7f565e9f7640 -1 osd.1 0 waiting for initial osdmap 2026-03-09T00:24:17.002 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:24:16 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1[68677]: 2026-03-09T00:24:16.536+0000 7f565a020640 -1 osd.1 11 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T00:24:17.259 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph osd stat -f json 2026-03-09T00:24:17.438 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:17.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:17 vm03 ceph-mon[50869]: purged_snaps scrub starts 2026-03-09T00:24:17.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:17 vm03 ceph-mon[50869]: purged_snaps scrub ok 2026-03-09T00:24:17.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:17 vm03 ceph-mon[50869]: pgmap v18: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-09T00:24:17.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:17 vm03 ceph-mon[50869]: OSD bench result of 49529.541994 IOPS is not within the threshold limit range of 50.000000 IOPS and 49000.000000 IOPS for osd.1. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:24:17.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:17 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:24:17.669 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:17.879 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":12,"num_osds":2,"num_up_osds":2,"osd_up_since":1773015857,"num_in_osds":2,"osd_in_since":1773015845,"num_remapped_pgs":0} 2026-03-09T00:24:17.879 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph osd dump --format=json 2026-03-09T00:24:18.048 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:18.286 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:18.286 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":12,"fsid":"06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86","created":"2026-03-09T00:23:18.421455+0000","modified":"2026-03-09T00:24:17.393908+0000","last_up_change":"2026-03-09T00:24:17.393908+0000","last_in_change":"2026-03-09T00:24:05.374045+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":6,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":0,"max_osd":2,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[],"osds":[{"osd":0,"uuid":"5ad1f957-0222-41fb-9fa0-cddd8b39e373","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6803","nonce":1273528910}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6805","nonce":1273528910}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6809","nonce":1273528910}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6807","nonce":1273528910}]},"public_addr":"192.168.123.103:6803/1273528910","cluster_addr":"192.168.123.103:6805/1273528910","heartbeat_back_addr":"192.168.123.103:6809/1273528910","heartbeat_front_addr":"192.168.123.103:6807/1273528910","state":["exists","up"]},{"osd":1,"uuid":"0ebc951a-85ad-4c95-abb8-da144761d783","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6811","nonce":3069294450}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6813","nonce":3069294450}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6817","nonce":3069294450}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6815","nonce":3069294450}]},"public_addr":"192.168.123.103:6811/3069294450","cluster_addr":"192.168.123.103:6813/3069294450","heartbeat_back_addr":"192.168.123.103:6817/3069294450","heartbeat_front_addr":"192.168.123.103:6815/3069294450","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T00:24:03.241762+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"0.000000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:6801/501559367":"2026-03-10T00:23:41.840491+0000","192.168.123.103:0/2913968064":"2026-03-10T00:23:41.840491+0000","192.168.123.103:6800/501559367":"2026-03-10T00:23:41.840491+0000","192.168.123.103:0/1433105058":"2026-03-10T00:23:41.840491+0000","192.168.123.103:0/1181699057":"2026-03-10T00:23:31.080159+0000","192.168.123.103:0/3392588079":"2026-03-10T00:23:41.840491+0000","192.168.123.103:6801/1146781988":"2026-03-10T00:23:31.080159+0000","192.168.123.103:0/3373468643":"2026-03-10T00:23:31.080159+0000","192.168.123.103:6800/1146781988":"2026-03-10T00:23:31.080159+0000","192.168.123.103:0/2171914270":"2026-03-10T00:23:31.080159+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T00:24:18.406 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:18 vm03 ceph-mon[50869]: osd.1 [v2:192.168.123.103:6810/3069294450,v1:192.168.123.103:6811/3069294450] boot 2026-03-09T00:24:18.447 INFO:tasks.cephadm.ceph_manager.ceph:[] 2026-03-09T00:24:18.448 INFO:tasks.cephadm:Setting up client nodes... 2026-03-09T00:24:18.448 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-09T00:24:18.629 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:18.676 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:18 vm03 ceph-mon[50869]: osdmap e12: 2 total, 2 up, 2 in 2026-03-09T00:24:18.676 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:18 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:24:18.676 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:18 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/4065177455' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T00:24:18.676 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:18 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/1775504072' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T00:24:18.915 INFO:teuthology.orchestra.run.vm03.stdout:[client.0] 2026-03-09T00:24:18.915 INFO:teuthology.orchestra.run.vm03.stdout: key = AQAyE65pUBZsNhAAoa1WAgIerZIviXSYkXUmyA== 2026-03-09T00:24:19.074 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T00:24:19.074 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-09T00:24:19.074 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-09T00:24:19.113 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-09T00:24:19.113 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-09T00:24:19.113 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph mgr dump --format=json 2026-03-09T00:24:19.333 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:19.450 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:19 vm03 ceph-mon[50869]: pgmap v20: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T00:24:19.450 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:19 vm03 ceph-mon[50869]: osdmap e13: 2 total, 2 up, 2 in 2026-03-09T00:24:19.450 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:19 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2895469422' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T00:24:19.450 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:19 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2895469422' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T00:24:19.594 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:19.771 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"flags":0,"active_gid":14150,"active_name":"x","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6800","nonce":1532573830},{"type":"v1","addr":"192.168.123.103:6801","nonce":1532573830}]},"active_addr":"192.168.123.103:6801/1532573830","active_change":"2026-03-09T00:23:41.840584+0000","active_mgr_features":4540701547738038271,"available":true,"standbys":[],"modules":["cephadm","dashboard","iostat","nfs","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.103:8443/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":3,"active_clients":[{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":1320558990}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":3244260308}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":3540533844}]}]} 2026-03-09T00:24:19.772 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-09T00:24:19.772 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-09T00:24:19.772 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph osd dump --format=json 2026-03-09T00:24:19.940 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:20.182 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:20.182 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"fsid":"06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86","created":"2026-03-09T00:23:18.421455+0000","modified":"2026-03-09T00:24:18.401278+0000","last_up_change":"2026-03-09T00:24:17.393908+0000","last_in_change":"2026-03-09T00:24:05.374045+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":6,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":0,"max_osd":2,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[],"osds":[{"osd":0,"uuid":"5ad1f957-0222-41fb-9fa0-cddd8b39e373","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6803","nonce":1273528910}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6805","nonce":1273528910}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6809","nonce":1273528910}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6807","nonce":1273528910}]},"public_addr":"192.168.123.103:6803/1273528910","cluster_addr":"192.168.123.103:6805/1273528910","heartbeat_back_addr":"192.168.123.103:6809/1273528910","heartbeat_front_addr":"192.168.123.103:6807/1273528910","state":["exists","up"]},{"osd":1,"uuid":"0ebc951a-85ad-4c95-abb8-da144761d783","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6811","nonce":3069294450}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6813","nonce":3069294450}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6817","nonce":3069294450}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6815","nonce":3069294450}]},"public_addr":"192.168.123.103:6811/3069294450","cluster_addr":"192.168.123.103:6813/3069294450","heartbeat_back_addr":"192.168.123.103:6817/3069294450","heartbeat_front_addr":"192.168.123.103:6815/3069294450","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T00:24:03.241762+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T00:24:14.809742+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:6801/501559367":"2026-03-10T00:23:41.840491+0000","192.168.123.103:0/2913968064":"2026-03-10T00:23:41.840491+0000","192.168.123.103:6800/501559367":"2026-03-10T00:23:41.840491+0000","192.168.123.103:0/1433105058":"2026-03-10T00:23:41.840491+0000","192.168.123.103:0/1181699057":"2026-03-10T00:23:31.080159+0000","192.168.123.103:0/3392588079":"2026-03-10T00:23:41.840491+0000","192.168.123.103:6801/1146781988":"2026-03-10T00:23:31.080159+0000","192.168.123.103:0/3373468643":"2026-03-10T00:23:31.080159+0000","192.168.123.103:6800/1146781988":"2026-03-10T00:23:31.080159+0000","192.168.123.103:0/2171914270":"2026-03-10T00:23:31.080159+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T00:24:20.427 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-09T00:24:20.427 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph osd dump --format=json 2026-03-09T00:24:20.608 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:20.634 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:20 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2684886241' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T00:24:20.634 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:20 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2485975440' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T00:24:20.849 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:20.849 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"fsid":"06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86","created":"2026-03-09T00:23:18.421455+0000","modified":"2026-03-09T00:24:18.401278+0000","last_up_change":"2026-03-09T00:24:17.393908+0000","last_in_change":"2026-03-09T00:24:05.374045+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":6,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":0,"max_osd":2,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[],"osds":[{"osd":0,"uuid":"5ad1f957-0222-41fb-9fa0-cddd8b39e373","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6803","nonce":1273528910}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6805","nonce":1273528910}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6809","nonce":1273528910}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":1273528910},{"type":"v1","addr":"192.168.123.103:6807","nonce":1273528910}]},"public_addr":"192.168.123.103:6803/1273528910","cluster_addr":"192.168.123.103:6805/1273528910","heartbeat_back_addr":"192.168.123.103:6809/1273528910","heartbeat_front_addr":"192.168.123.103:6807/1273528910","state":["exists","up"]},{"osd":1,"uuid":"0ebc951a-85ad-4c95-abb8-da144761d783","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6811","nonce":3069294450}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6813","nonce":3069294450}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6817","nonce":3069294450}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3069294450},{"type":"v1","addr":"192.168.123.103:6815","nonce":3069294450}]},"public_addr":"192.168.123.103:6811/3069294450","cluster_addr":"192.168.123.103:6813/3069294450","heartbeat_back_addr":"192.168.123.103:6817/3069294450","heartbeat_front_addr":"192.168.123.103:6815/3069294450","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T00:24:03.241762+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T00:24:14.809742+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:6801/501559367":"2026-03-10T00:23:41.840491+0000","192.168.123.103:0/2913968064":"2026-03-10T00:23:41.840491+0000","192.168.123.103:6800/501559367":"2026-03-10T00:23:41.840491+0000","192.168.123.103:0/1433105058":"2026-03-10T00:23:41.840491+0000","192.168.123.103:0/1181699057":"2026-03-10T00:23:31.080159+0000","192.168.123.103:0/3392588079":"2026-03-10T00:23:41.840491+0000","192.168.123.103:6801/1146781988":"2026-03-10T00:23:31.080159+0000","192.168.123.103:0/3373468643":"2026-03-10T00:23:31.080159+0000","192.168.123.103:6800/1146781988":"2026-03-10T00:23:31.080159+0000","192.168.123.103:0/2171914270":"2026-03-10T00:23:31.080159+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T00:24:21.026 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph tell osd.0 flush_pg_stats 2026-03-09T00:24:21.027 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph tell osd.1 flush_pg_stats 2026-03-09T00:24:21.205 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:21.212 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:21.530 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:21 vm03 ceph-mon[50869]: pgmap v22: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T00:24:21.530 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:21 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/4178477172' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T00:24:21.568 INFO:teuthology.orchestra.run.vm03.stdout:34359738374 2026-03-09T00:24:21.568 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph osd last-stat-seq osd.0 2026-03-09T00:24:21.573 INFO:teuthology.orchestra.run.vm03.stdout:51539607554 2026-03-09T00:24:21.573 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph osd last-stat-seq osd.1 2026-03-09T00:24:21.768 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:21.860 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:22.041 INFO:teuthology.orchestra.run.vm03.stdout:34359738372 2026-03-09T00:24:22.121 INFO:teuthology.orchestra.run.vm03.stdout:51539607553 2026-03-09T00:24:22.223 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738374 got 34359738372 for osd.0 2026-03-09T00:24:22.274 INFO:tasks.cephadm.ceph_manager.ceph:need seq 51539607554 got 51539607553 for osd.1 2026-03-09T00:24:22.751 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:22 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2641106261' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-09T00:24:22.751 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:22 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/4190798950' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-09T00:24:23.225 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph osd last-stat-seq osd.0 2026-03-09T00:24:23.275 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph osd last-stat-seq osd.1 2026-03-09T00:24:23.431 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:23.501 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:23 vm03 ceph-mon[50869]: pgmap v23: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T00:24:23.535 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:23.720 INFO:teuthology.orchestra.run.vm03.stdout:34359738374 2026-03-09T00:24:23.779 INFO:teuthology.orchestra.run.vm03.stdout:51539607554 2026-03-09T00:24:23.879 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738374 got 34359738374 for osd.0 2026-03-09T00:24:23.879 DEBUG:teuthology.parallel:result is None 2026-03-09T00:24:23.931 INFO:tasks.cephadm.ceph_manager.ceph:need seq 51539607554 got 51539607554 for osd.1 2026-03-09T00:24:23.931 DEBUG:teuthology.parallel:result is None 2026-03-09T00:24:23.931 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-09T00:24:23.931 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph pg dump --format=json 2026-03-09T00:24:24.097 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:24.328 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:24.328 INFO:teuthology.orchestra.run.vm03.stderr:dumped all 2026-03-09T00:24:24.418 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:24 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/123211213' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-09T00:24:24.418 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:24 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/3436731152' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-09T00:24:24.485 INFO:teuthology.orchestra.run.vm03.stdout:{"pg_ready":true,"pg_map":{"version":24,"stamp":"2026-03-09T00:24:23.855694+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":0,"num_osds":2,"num_per_pool_osds":2,"num_per_pool_omap_osds":0,"kb":41934848,"kb_used":53900,"kb_used_data":216,"kb_used_omap":3,"kb_used_meta":53628,"kb_avail":41880948,"statfs":{"total":42941284352,"available":42886090752,"internally_reserved":0,"allocated":221184,"data_stored":53225,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":3180,"internal_metadata":54915988},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"0.000000"},"pg_stats":[],"pool_stats":[],"osd_stats":[{"osd":1,"up_from":12,"seq":51539607555,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26952,"kb_used_data":112,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940472,"statfs":{"total":21470642176,"available":21443043328,"internally_reserved":0,"allocated":114688,"data_stored":27832,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":8,"seq":34359738374,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26948,"kb_used_data":104,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940476,"statfs":{"total":21470642176,"available":21443047424,"internally_reserved":0,"allocated":106496,"data_stored":25393,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[]}} 2026-03-09T00:24:24.485 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph pg dump --format=json 2026-03-09T00:24:24.661 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:24.902 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:24.902 INFO:teuthology.orchestra.run.vm03.stderr:dumped all 2026-03-09T00:24:25.055 INFO:teuthology.orchestra.run.vm03.stdout:{"pg_ready":true,"pg_map":{"version":24,"stamp":"2026-03-09T00:24:23.855694+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":0,"num_osds":2,"num_per_pool_osds":2,"num_per_pool_omap_osds":0,"kb":41934848,"kb_used":53900,"kb_used_data":216,"kb_used_omap":3,"kb_used_meta":53628,"kb_avail":41880948,"statfs":{"total":42941284352,"available":42886090752,"internally_reserved":0,"allocated":221184,"data_stored":53225,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":3180,"internal_metadata":54915988},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"0.000000"},"pg_stats":[],"pool_stats":[],"osd_stats":[{"osd":1,"up_from":12,"seq":51539607555,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26952,"kb_used_data":112,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940472,"statfs":{"total":21470642176,"available":21443043328,"internally_reserved":0,"allocated":114688,"data_stored":27832,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":8,"seq":34359738374,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26948,"kb_used_data":104,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940476,"statfs":{"total":21470642176,"available":21443047424,"internally_reserved":0,"allocated":106496,"data_stored":25393,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[]}} 2026-03-09T00:24:25.055 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-09T00:24:25.055 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-09T00:24:25.055 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-09T00:24:25.055 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph health --format=json 2026-03-09T00:24:25.221 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:25.475 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:25.475 INFO:teuthology.orchestra.run.vm03.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-09T00:24:25.670 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:25 vm03 ceph-mon[50869]: pgmap v24: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T00:24:25.670 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:25 vm03 ceph-mon[50869]: from='client.14230 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:25.696 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-09T00:24:25.696 INFO:tasks.cephadm:Setup complete, yielding 2026-03-09T00:24:25.696 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T00:24:25.699 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-09T00:24:25.699 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- bash -c 'ceph fs volume create cephfs' 2026-03-09T00:24:25.873 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:26.751 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:26 vm03 ceph-mon[50869]: from='client.14232 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:26.751 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:26 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2733658143' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-09T00:24:26.751 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:26 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "osd pool create", "pool": "cephfs.cephfs.meta"}]: dispatch 2026-03-09T00:24:27.612 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-09T00:24:27.615 INFO:tasks.cephadm:Waiting for ceph service mds.cephfs to start (timeout 300)... 2026-03-09T00:24:27.615 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:27.738 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:27 vm03 ceph-mon[50869]: pgmap v25: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T00:24:27.738 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:27 vm03 ceph-mon[50869]: from='client.14236 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "cephfs", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:24:27.738 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:27 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd='[{"prefix": "osd pool create", "pool": "cephfs.cephfs.meta"}]': finished 2026-03-09T00:24:27.738 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:27 vm03 ceph-mon[50869]: osdmap e14: 2 total, 2 up, 2 in 2026-03-09T00:24:27.738 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:27 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.cephfs.data"}]: dispatch 2026-03-09T00:24:27.738 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:27 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd='[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.cephfs.data"}]': finished 2026-03-09T00:24:27.738 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:27 vm03 ceph-mon[50869]: osdmap e15: 2 total, 2 up, 2 in 2026-03-09T00:24:27.738 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:27 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a[50846]: 2026-03-09T00:24:27.436+0000 7f06dcec4640 -1 log_channel(cluster) log [ERR] : Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-09T00:24:27.984 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:28.509 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:28.509 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:27.457088Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "running": 0, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:13.088152Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:13.088085Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:13.088197Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "fs new", "fs_name": "cephfs", "metadata": "cephfs.cephfs.meta", "data": "cephfs.cephfs.data"}]: dispatch 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX) 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd='[{"prefix": "fs new", "fs_name": "cephfs", "metadata": "cephfs.cephfs.meta", "data": "cephfs.cephfs.data"}]': finished 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: osdmap e16: 2 total, 2 up, 2 in 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: fsmap cephfs:0 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: Saving service mds.cephfs spec with placement count:2 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mds.cephfs.vm03.nrbsbw", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd='[{"prefix": "auth get-or-create", "entity": "mds.cephfs.vm03.nrbsbw", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:28.635 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:28 vm03 ceph-mon[50869]: Deploying daemon mds.cephfs.vm03.nrbsbw on vm03 2026-03-09T00:24:28.664 INFO:tasks.cephadm:mds.cephfs has 0/2 2026-03-09T00:24:29.665 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:29.693 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:29 vm03 ceph-mon[50869]: pgmap v29: 64 pgs: 39 active+undersized, 25 unknown; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T00:24:29.693 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:29 vm03 ceph-mon[50869]: osdmap e17: 2 total, 2 up, 2 in 2026-03-09T00:24:29.693 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:29 vm03 ceph-mon[50869]: from='client.14238 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:29.693 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:29 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:29.693 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:29 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:29.693 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:29 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:29.693 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:29 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mds.cephfs.vm03.enrdyu", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-09T00:24:29.693 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:29 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd='[{"prefix": "auth get-or-create", "entity": "mds.cephfs.vm03.enrdyu", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-09T00:24:29.693 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:29 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:29.693 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:29 vm03 ceph-mon[50869]: Deploying daemon mds.cephfs.vm03.enrdyu on vm03 2026-03-09T00:24:29.854 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:30.111 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:30.111 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "running": 0, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:13.088152Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:13.088085Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:13.088197Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-09T00:24:30.289 INFO:tasks.cephadm:mds.cephfs has 0/2 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: mds.? [v2:192.168.123.103:6818/1974279541,v1:192.168.123.103:6819/1974279541] up:boot 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: daemon mds.cephfs.vm03.nrbsbw assigned to filesystem cephfs as rank 0 (now has 1 ranks) 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline) 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds) 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: Cluster is now healthy 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: fsmap cephfs:0 1 up:standby 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "mds metadata", "who": "cephfs.vm03.nrbsbw"}]: dispatch 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: fsmap cephfs:1 {0=cephfs.vm03.nrbsbw=up:creating} 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: daemon mds.cephfs.vm03.nrbsbw is now active in filesystem cephfs as rank 0 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:31.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:30 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:31.290 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:31.461 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: pgmap v31: 64 pgs: 39 active+undersized, 25 unknown; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: from='client.14244 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: mds.? [v2:192.168.123.103:6820/2773492818,v1:192.168.123.103:6821/2773492818] up:boot 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: mds.? [v2:192.168.123.103:6818/1974279541,v1:192.168.123.103:6819/1974279541] up:active 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: fsmap cephfs:1 {0=cephfs.vm03.nrbsbw=up:active} 1 up:standby 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "mds metadata", "who": "cephfs.vm03.enrdyu"}]: dispatch 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: fsmap cephfs:1 {0=cephfs.vm03.nrbsbw=up:active} 1 up:standby 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:31.577 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:31 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:31.700 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:31.700 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "last_refresh": "2026-03-09T00:24:30.863233Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:30.863106Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:30.863041Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:30.863151Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-09T00:24:32.035 INFO:tasks.cephadm:mds.cephfs has 2/2 2026-03-09T00:24:32.035 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T00:24:32.037 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-09T00:24:32.037 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- bash -c 'ceph fs subvolumegroup create cephfs g1' 2026-03-09T00:24:32.214 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:32.724 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- bash -c 'ceph fs subvolume create cephfs sub1 --group-name=g1 --mode=0777' 2026-03-09T00:24:32.884 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:32 vm03 ceph-mon[50869]: from='client.14246 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:32.884 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:32 vm03 ceph-mon[50869]: pgmap v32: 64 pgs: 3 active+undersized+degraded, 61 active+undersized; 592 B data, 53 MiB used, 40 GiB / 40 GiB avail; 7/21 objects degraded (33.333%) 2026-03-09T00:24:32.884 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:32 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:32.884 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:32 vm03 ceph-mon[50869]: Health check failed: Degraded data redundancy: 7/21 objects degraded (33.333%), 3 pgs degraded (PG_DEGRADED) 2026-03-09T00:24:32.884 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:32 vm03 ceph-mon[50869]: from='client.14248 -' entity='client.admin' cmd=[{"prefix": "fs subvolumegroup create", "vol_name": "cephfs", "group_name": "g1", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:24:32.884 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:24:32 vm03 ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mgr-x[51101]: 2026-03-09T00:24:32.565+0000 7fb6b712f640 -1 client.14250 error registering admin socket command: (17) File exists 2026-03-09T00:24:32.904 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:33.315 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- bash -c 'ceph fs authorize cephfs client.smbdata / rw' 2026-03-09T00:24:33.492 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:33.762 INFO:teuthology.orchestra.run.vm03.stdout:[client.smbdata] 2026-03-09T00:24:33.762 INFO:teuthology.orchestra.run.vm03.stdout: key = AQBBE65ppbc8LRAAsuNoVsEkgwiIpAcxnsoycw== 2026-03-09T00:24:33.762 INFO:teuthology.orchestra.run.vm03.stdout: caps mds = "allow rw fsname=cephfs" 2026-03-09T00:24:33.762 INFO:teuthology.orchestra.run.vm03.stdout: caps mon = "allow r fsname=cephfs" 2026-03-09T00:24:33.762 INFO:teuthology.orchestra.run.vm03.stdout: caps osd = "allow rw tag cephfs data=cephfs" 2026-03-09T00:24:33.933 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- bash -c 'ceph osd pool create .smb --yes-i-really-mean-it' 2026-03-09T00:24:34.100 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:34.126 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:33 vm03 ceph-mon[50869]: from='client.14252 -' entity='client.admin' cmd=[{"prefix": "fs subvolume create", "vol_name": "cephfs", "sub_name": "sub1", "group_name": "g1", "mode": "0777", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:24:34.127 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:33 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/1385345586' entity='client.admin' cmd=[{"prefix": "fs authorize", "filesystem": "cephfs", "entity": "client.smbdata", "caps": ["/", "rw"]}]: dispatch 2026-03-09T00:24:34.127 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:33 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/1385345586' entity='client.admin' cmd='[{"prefix": "fs authorize", "filesystem": "cephfs", "entity": "client.smbdata", "caps": ["/", "rw"]}]': finished 2026-03-09T00:24:34.893 INFO:teuthology.orchestra.run.vm03.stderr:pool '.smb' created 2026-03-09T00:24:35.052 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- bash -c 'ceph osd pool application enable .smb smb' 2026-03-09T00:24:35.226 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:35.250 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:34 vm03 ceph-mon[50869]: pgmap v33: 64 pgs: 10 active+undersized+degraded, 54 active+undersized; 8.9 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 10 KiB/s wr, 14 op/s; 22/66 objects degraded (33.333%) 2026-03-09T00:24:35.250 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:34 vm03 ceph-mon[50869]: mds.? [v2:192.168.123.103:6820/2773492818,v1:192.168.123.103:6821/2773492818] up:standby 2026-03-09T00:24:35.250 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:34 vm03 ceph-mon[50869]: fsmap cephfs:1 {0=cephfs.vm03.nrbsbw=up:active} 1 up:standby 2026-03-09T00:24:35.250 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:34 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/3755218628' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": ".smb", "yes_i_really_mean_it": true}]: dispatch 2026-03-09T00:24:35.250 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:34 vm03 ceph-mon[50869]: mgrmap e14: x(active, since 52s) 2026-03-09T00:24:35.898 INFO:teuthology.orchestra.run.vm03.stderr:enabled application 'smb' on pool '.smb' 2026-03-09T00:24:36.051 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- bash -c 'rados --pool=.smb --namespace=admem1 put conf.toml /dev/stdin' 2026-03-09T00:24:36.219 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:36.243 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:35 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/3755218628' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": ".smb", "yes_i_really_mean_it": true}]': finished 2026-03-09T00:24:36.243 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:35 vm03 ceph-mon[50869]: osdmap e18: 2 total, 2 up, 2 in 2026-03-09T00:24:36.243 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:35 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/3479637481' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": ".smb", "app": "smb"}]: dispatch 2026-03-09T00:24:36.470 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- bash -c 'ceph config-key set smb/config/admem1/join1.json -i -' 2026-03-09T00:24:36.637 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:36.925 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:36 vm03 ceph-mon[50869]: pgmap v35: 96 pgs: 32 unknown, 10 active+undersized+degraded, 54 active+undersized; 8.9 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 5.2 KiB/s wr, 7 op/s; 22/66 objects degraded (33.333%) 2026-03-09T00:24:36.925 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:36 vm03 ceph-mon[50869]: Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED) 2026-03-09T00:24:36.925 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:36 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/3479637481' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": ".smb", "app": "smb"}]': finished 2026-03-09T00:24:36.925 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:36 vm03 ceph-mon[50869]: osdmap e19: 2 total, 2 up, 2 in 2026-03-09T00:24:36.926 INFO:teuthology.orchestra.run.vm03.stderr:set smb/config/admem1/join1.json 2026-03-09T00:24:37.099 INFO:teuthology.run_tasks:Running task cephadm.apply... 2026-03-09T00:24:37.103 INFO:tasks.cephadm:Applying spec(s): cluster_id: admem1 config_uri: rados://.smb/admem1/conf.toml custom_dns: - 192.168.123.106 features: - domain include_ceph_users: - client.smbdata join_sources: - rados:mon-config-key:smb/config/admem1/join1.json placement: count: 1 service_id: admem1 service_type: smb 2026-03-09T00:24:37.103 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch apply -i - 2026-03-09T00:24:37.287 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:37.538 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled smb.admem1 update... 2026-03-09T00:24:37.708 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-09T00:24:37.711 INFO:tasks.cephadm:Waiting for ceph service smb.admem1 to start (timeout 300)... 2026-03-09T00:24:37.711 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:37.955 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:37.987 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: osdmap e20: 2 total, 2 up, 2 in 2026-03-09T00:24:37.987 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: from='client.? 192.168.123.103:0/2538805490' entity='client.admin' 2026-03-09T00:24:37.987 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: from='client.14265 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:24:37.987 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: Saving service smb.admem1 spec with placement count:1 2026-03-09T00:24:37.987 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:37.987 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:37.987 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:37.987 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:37.987 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:37.987 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.smb.config.admem1.vm03.mqtnto", "caps": ["mon", "allow r, allow command \"config-key get\" with \"key\" prefix \"smb/config/admem1/\"", "osd", "allow r pool=.smb"]}]: dispatch 2026-03-09T00:24:37.988 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd='[{"prefix": "auth get-or-create", "entity": "client.smb.config.admem1.vm03.mqtnto", "caps": ["mon", "allow r, allow command \"config-key get\" with \"key\" prefix \"smb/config/admem1/\"", "osd", "allow r pool=.smb"]}]': finished 2026-03-09T00:24:37.988 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.smbdata"}]: dispatch 2026-03-09T00:24:37.988 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:37.988 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:37 vm03 ceph-mon[50869]: Deploying daemon smb.admem1.vm03.mqtnto on vm03 2026-03-09T00:24:38.271 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:38.272 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "last_refresh": "2026-03-09T00:24:30.863233Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:30.863106Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:30.863041Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:30.863151Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T00:24:37.536489Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.106"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T00:24:37.533666Z", "running": 0, "size": 1}}] 2026-03-09T00:24:38.482 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T00:24:39.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:38 vm03 ceph-mon[50869]: pgmap v38: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 29 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 7.8 KiB/s wr, 6 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:24:39.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:38 vm03 ceph-mon[50869]: from='client.14267 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:39.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:38 vm03 ceph-mon[50869]: Health check update: Degraded data redundancy: 24/72 objects degraded (33.333%), 15 pgs degraded (PG_DEGRADED) 2026-03-09T00:24:39.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:38 vm03 ceph-mon[50869]: Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled) 2026-03-09T00:24:39.483 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:39.670 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:39.985 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:39.985 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "last_refresh": "2026-03-09T00:24:30.863233Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:30.863106Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:30.863041Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:30.863151Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T00:24:37.536489Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.106"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T00:24:37.533666Z", "running": 0, "size": 1}}] 2026-03-09T00:24:40.153 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T00:24:41.154 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:41.187 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:40 vm03 ceph-mon[50869]: pgmap v39: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 29 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 4.3 KiB/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:24:41.187 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:40 vm03 ceph-mon[50869]: from='client.14269 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:41.390 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:41.700 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:41.700 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "last_refresh": "2026-03-09T00:24:30.863233Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:30.863106Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:30.863041Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:30.863151Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T00:24:37.536489Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.106"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T00:24:37.533666Z", "running": 0, "size": 1}}] 2026-03-09T00:24:41.915 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T00:24:42.915 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:43.143 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:43.188 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:42 vm03 ceph-mon[50869]: from='client.14271 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:43.188 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:42 vm03 ceph-mon[50869]: pgmap v40: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 29 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 3.7 KiB/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:24:43.426 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:43.426 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "last_refresh": "2026-03-09T00:24:30.863233Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:30.863106Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:30.863041Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:30.863151Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T00:24:37.536489Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.106"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T00:24:37.533666Z", "running": 0, "size": 1}}] 2026-03-09T00:24:43.579 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T00:24:44.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:43 vm03 ceph-mon[50869]: from='client.14273 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:44.579 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:44.801 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:45.082 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:45.082 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "last_refresh": "2026-03-09T00:24:30.863233Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:30.863106Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:30.863041Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:30.863151Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T00:24:37.536489Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.106"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T00:24:37.533666Z", "running": 0, "size": 1}}] 2026-03-09T00:24:45.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:44 vm03 ceph-mon[50869]: pgmap v41: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 36 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 4.5 KiB/s wr, 2 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:24:45.558 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T00:24:46.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:45 vm03 ceph-mon[50869]: from='client.14275 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:46.559 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:46.761 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:47.054 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:47.054 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "last_refresh": "2026-03-09T00:24:30.863233Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:30.863106Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:30.863041Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:30.863151Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T00:24:37.536489Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.106"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T00:24:37.533666Z", "running": 0, "size": 1}}] 2026-03-09T00:24:47.195 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:46 vm03 ceph-mon[50869]: pgmap v42: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 36 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 3.6 KiB/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:24:47.235 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T00:24:48.236 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:48.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:47 vm03 ceph-mon[50869]: from='client.14277 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:48.460 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:48.752 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:48.752 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "last_refresh": "2026-03-09T00:24:30.863233Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:30.863106Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:30.863041Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:30.863151Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T00:24:37.536489Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.106"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T00:24:37.533666Z", "running": 0, "size": 1}}] 2026-03-09T00:24:48.934 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T00:24:49.165 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:48 vm03 ceph-mon[50869]: pgmap v43: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 36 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 3.3 KiB/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:24:49.935 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:50.738 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:50.911 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:50 vm03.local ceph-mon[50869]: from='client.14279 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:51.172 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:51.172 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "last_refresh": "2026-03-09T00:24:30.863233Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:30.863106Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:30.863041Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:30.863151Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T00:24:37.536489Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.106"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T00:24:37.533666Z", "running": 0, "size": 1}}] 2026-03-09T00:24:51.371 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T00:24:51.796 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:51 vm03.local ceph-mon[50869]: pgmap v44: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 36 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 853 B/s wr, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:24:51.796 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:51 vm03.local ceph-mon[50869]: from='client.? 10.88.0.3:0/3703404264' entity='client.smb.config.admem1.vm03.mqtnto' cmd=[{"prefix": "config-key get", "key": "smb/config/admem1/join1.json"}]: dispatch 2026-03-09T00:24:52.372 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:52.747 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:52.759 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:52 vm03.local ceph-mon[50869]: from='client.14283 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:52.759 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:52 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:52.759 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:52 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:52.759 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:52 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:52.759 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:52 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:52.759 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:52 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:53.057 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:53.057 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "last_refresh": "2026-03-09T00:24:30.863233Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:30.863106Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:30.863041Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:30.863151Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T00:24:51.903142Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.106"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T00:24:37.533666Z", "running": 0, "size": 1}}] 2026-03-09T00:24:53.272 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T00:24:54.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:53 vm03.local ceph-mon[50869]: pgmap v45: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 36 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 853 B/s wr, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:24:54.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:53 vm03.local ceph-mon[50869]: config is a no-op 2026-03-09T00:24:54.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:53 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:54.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:53 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:54.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:53 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:54.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:53 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:54.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:53 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:54.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:53 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:24:54.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:53 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:24:54.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:53 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:24:54.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:53 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:54.273 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph orch ls -f json 2026-03-09T00:24:54.480 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:24:54.766 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T00:24:54.766 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T00:24:29.561827Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T00:24:27.448388Z", "last_refresh": "2026-03-09T00:24:53.084982Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T00:23:50.372830Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T00:23:50.360245Z", "last_refresh": "2026-03-09T00:24:53.084862Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T00:23:48.298709Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T00:23:48.287271Z", "last_refresh": "2026-03-09T00:24:53.084808Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T00:24:53.084900Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T00:24:51.903142Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.106"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T00:24:37.533666Z", "last_refresh": "2026-03-09T00:24:53.085053Z", "running": 1, "size": 1}}] 2026-03-09T00:24:54.896 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:54 vm03.local ceph-mon[50869]: from='client.14295 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:54.928 INFO:tasks.cephadm:smb.admem1 has 1/1 2026-03-09T00:24:54.928 INFO:teuthology.run_tasks:Running task cephadm.exec... 2026-03-09T00:24:54.932 INFO:tasks.cephadm:Running commands on role host.b host ubuntu@vm06.local 2026-03-09T00:24:54.932 DEBUG:teuthology.orchestra.run.vm06:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'sleep 30' 2026-03-09T00:24:54.957 INFO:teuthology.orchestra.run.vm06.stderr:+ sleep 30 2026-03-09T00:24:56.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:55 vm03.local ceph-mon[50869]: pgmap v46: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 426 B/s rd, 1.1 KiB/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:24:56.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:55 vm03.local ceph-mon[50869]: from='client.14301 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T00:24:57.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:56 vm03.local ceph-mon[50869]: pgmap v47: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 426 B/s rd, 255 B/s wr, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:24:58.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:57 vm03.local ceph-mon[50869]: from='mgr.14150 192.168.123.103:0/669198011' entity='mgr.x' 2026-03-09T00:24:59.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:24:58 vm03.local ceph-mon[50869]: pgmap v48: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 852 B/s rd, 255 B/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:01.252 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:00 vm03.local ceph-mon[50869]: pgmap v49: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 852 B/s rd, 255 B/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:03.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:02 vm03.local ceph-mon[50869]: pgmap v50: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 852 B/s rd, 255 B/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:05.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:04 vm03.local ceph-mon[50869]: pgmap v51: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 1.2 KiB/s rd, 255 B/s wr, 2 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:07.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:06 vm03.local ceph-mon[50869]: pgmap v52: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 852 B/s rd, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:09.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:08 vm03.local ceph-mon[50869]: pgmap v53: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 938 B/s rd, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:11.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:10 vm03.local ceph-mon[50869]: pgmap v54: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 511 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:13.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:12 vm03.local ceph-mon[50869]: pgmap v55: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 511 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:15.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:14 vm03.local ceph-mon[50869]: pgmap v56: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 597 B/s rd, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:17.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:16 vm03.local ceph-mon[50869]: pgmap v57: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 170 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:19.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:18 vm03.local ceph-mon[50869]: pgmap v58: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 255 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:21.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:20 vm03.local ceph-mon[50869]: pgmap v59: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 170 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:23.251 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:22 vm03.local ceph-mon[50869]: pgmap v60: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 170 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:24.961 DEBUG:teuthology.orchestra.run.vm06:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'sudo podman run --rm --net=host --dns=192.168.123.106 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U DOMAIN1\\ckent%1115Rose. //192.168.123.103/share1 -c ls' 2026-03-09T00:25:25.027 INFO:teuthology.orchestra.run.vm06.stderr:+ sudo podman run --rm --net=host --dns=192.168.123.106 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //192.168.123.103/share1 -c ls 2026-03-09T00:25:25.212 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:24 vm03.local ceph-mon[50869]: pgmap v61: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 255 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T00:25:25.252 INFO:teuthology.orchestra.run.vm06.stdout: . D 0 Mon Mar 9 00:24:32 2026 2026-03-09T00:25:25.252 INFO:teuthology.orchestra.run.vm06.stdout: .. D 0 Mon Mar 9 00:24:32 2026 2026-03-09T00:25:25.252 INFO:teuthology.orchestra.run.vm06.stdout: volumes D 0 Mon Mar 9 00:24:33 2026 2026-03-09T00:25:25.252 INFO:teuthology.orchestra.run.vm06.stdout: 2026-03-09T00:25:25.252 INFO:teuthology.orchestra.run.vm06.stdout: 13258752 blocks of size 1024. 13258752 blocks available 2026-03-09T00:25:25.383 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-09T00:25:25.387 INFO:tasks.cephadm:Teardown begin 2026-03-09T00:25:25.387 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T00:25:25.417 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T00:25:25.452 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-09T00:25:25.452 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 -- ceph mgr module disable cephadm 2026-03-09T00:25:25.660 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/mon.a/config 2026-03-09T00:25:25.685 INFO:teuthology.orchestra.run.vm03.stderr:Error: statfs /etc/ceph/ceph.client.admin.keyring: no such file or directory 2026-03-09T00:25:25.711 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-03-09T00:25:25.712 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-09T00:25:25.712 DEBUG:teuthology.orchestra.run.vm03:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T00:25:25.729 DEBUG:teuthology.orchestra.run.vm06:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T00:25:25.746 INFO:tasks.cephadm:Stopping all daemons... 2026-03-09T00:25:25.746 INFO:tasks.cephadm.mon.a:Stopping mon.a... 2026-03-09T00:25:25.746 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mon.a 2026-03-09T00:25:26.150 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:25 vm03.local systemd[1]: Stopping Ceph mon.a for 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86... 2026-03-09T00:25:26.150 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:25 vm03.local ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a[50846]: 2026-03-09T00:25:25.901+0000 7f06e26cf640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:25:26.150 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:25 vm03.local ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a[50846]: 2026-03-09T00:25:25.901+0000 7f06e26cf640 -1 mon.a@0(leader) e1 *** Got Signal Terminated *** 2026-03-09T00:25:26.150 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 00:25:26 vm03.local podman[79215]: 2026-03-09 00:25:26.029338225 +0000 UTC m=+0.144698107 container died d732babca9c1f382d37cef46bfec5340c187b09e69d1cd22e789c00d68707b6f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-mon-a, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS) 2026-03-09T00:25:26.238 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mon.a.service' 2026-03-09T00:25:26.282 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:25:26.282 INFO:tasks.cephadm.mon.a:Stopped mon.a 2026-03-09T00:25:26.282 INFO:tasks.cephadm.mgr.x:Stopping mgr.x... 2026-03-09T00:25:26.282 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mgr.x 2026-03-09T00:25:26.493 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 00:25:26 vm03.local systemd[1]: Stopping Ceph mgr.x for 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86... 2026-03-09T00:25:26.715 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@mgr.x.service' 2026-03-09T00:25:26.750 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:25:26.750 INFO:tasks.cephadm.mgr.x:Stopped mgr.x 2026-03-09T00:25:26.750 INFO:tasks.cephadm.osd.0:Stopping osd.0... 2026-03-09T00:25:26.750 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@osd.0 2026-03-09T00:25:27.001 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:26 vm03.local systemd[1]: Stopping Ceph osd.0 for 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86... 2026-03-09T00:25:27.001 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:26 vm03.local ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0[63478]: 2026-03-09T00:25:26.922+0000 7f4f37072640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:25:27.002 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:26 vm03.local ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0[63478]: 2026-03-09T00:25:26.922+0000 7f4f37072640 -1 osd.0 20 *** Got signal Terminated *** 2026-03-09T00:25:27.002 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:26 vm03.local ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0[63478]: 2026-03-09T00:25:26.922+0000 7f4f37072640 -1 osd.0 20 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:25:32.216 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:31 vm03.local podman[79441]: 2026-03-09 00:25:31.95480622 +0000 UTC m=+5.066413373 container died 97cc97ee15d1a7fd320d4c1361995e732ad19b191ccf1d22c9ecf6e550af1763 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223) 2026-03-09T00:25:32.217 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:32 vm03.local podman[79441]: 2026-03-09 00:25:32.076489399 +0000 UTC m=+5.188096552 container remove 97cc97ee15d1a7fd320d4c1361995e732ad19b191ccf1d22c9ecf6e550af1763 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:25:32.217 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:32 vm03.local bash[79441]: ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0 2026-03-09T00:25:32.502 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:32 vm03.local podman[79523]: 2026-03-09 00:25:32.216686209 +0000 UTC m=+0.017404178 container create 2e98c98f6f03eb33f665c98f92fdc8caeed911ea475c0af0a5ffdec70b4260f8 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0-deactivate, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:25:32.502 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:32 vm03.local podman[79523]: 2026-03-09 00:25:32.268321911 +0000 UTC m=+0.069039880 container init 2e98c98f6f03eb33f665c98f92fdc8caeed911ea475c0af0a5ffdec70b4260f8 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, io.buildah.version=1.41.3) 2026-03-09T00:25:32.502 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:32 vm03.local podman[79523]: 2026-03-09 00:25:32.273959272 +0000 UTC m=+0.074677241 container start 2e98c98f6f03eb33f665c98f92fdc8caeed911ea475c0af0a5ffdec70b4260f8 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T00:25:32.502 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:32 vm03.local podman[79523]: 2026-03-09 00:25:32.279067454 +0000 UTC m=+0.079785434 container attach 2e98c98f6f03eb33f665c98f92fdc8caeed911ea475c0af0a5ffdec70b4260f8 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0-deactivate, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:25:32.502 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:32 vm03.local podman[79523]: 2026-03-09 00:25:32.209766035 +0000 UTC m=+0.010484004 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:25:32.502 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 00:25:32 vm03.local podman[79523]: 2026-03-09 00:25:32.424224591 +0000 UTC m=+0.224942560 container died 2e98c98f6f03eb33f665c98f92fdc8caeed911ea475c0af0a5ffdec70b4260f8 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-0-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0) 2026-03-09T00:25:32.557 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@osd.0.service' 2026-03-09T00:25:32.590 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:25:32.590 INFO:tasks.cephadm.osd.0:Stopped osd.0 2026-03-09T00:25:32.590 INFO:tasks.cephadm.osd.1:Stopping osd.1... 2026-03-09T00:25:32.590 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@osd.1 2026-03-09T00:25:33.002 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:32 vm03.local systemd[1]: Stopping Ceph osd.1 for 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86... 2026-03-09T00:25:33.002 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:32 vm03.local ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1[68677]: 2026-03-09T00:25:32.737+0000 7f565fa0b640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:25:33.002 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:32 vm03.local ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1[68677]: 2026-03-09T00:25:32.737+0000 7f565fa0b640 -1 osd.1 20 *** Got signal Terminated *** 2026-03-09T00:25:33.002 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:32 vm03.local ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1[68677]: 2026-03-09T00:25:32.737+0000 7f565fa0b640 -1 osd.1 20 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:25:38.029 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:37 vm03.local podman[79645]: 2026-03-09 00:25:37.761553796 +0000 UTC m=+5.039885789 container died 61d809a8d3ac897d03ad6074605c3326e9f7d06bc71f468520a7806987b8879b (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default) 2026-03-09T00:25:38.029 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:37 vm03.local podman[79645]: 2026-03-09 00:25:37.889586329 +0000 UTC m=+5.167918313 container remove 61d809a8d3ac897d03ad6074605c3326e9f7d06bc71f468520a7806987b8879b (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS) 2026-03-09T00:25:38.029 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:37 vm03.local bash[79645]: ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1 2026-03-09T00:25:38.340 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:38 vm03.local podman[79725]: 2026-03-09 00:25:38.028984494 +0000 UTC m=+0.017309320 container create cddfa17cd22f6c4ad47ec5b6e9561fe8c5a9e6fb7acf452bbd224951155627a0 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:25:38.340 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:38 vm03.local podman[79725]: 2026-03-09 00:25:38.082844689 +0000 UTC m=+0.071169525 container init cddfa17cd22f6c4ad47ec5b6e9561fe8c5a9e6fb7acf452bbd224951155627a0 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1-deactivate, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:25:38.340 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:38 vm03.local podman[79725]: 2026-03-09 00:25:38.088760263 +0000 UTC m=+0.077085089 container start cddfa17cd22f6c4ad47ec5b6e9561fe8c5a9e6fb7acf452bbd224951155627a0 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, ceph=True) 2026-03-09T00:25:38.340 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:38 vm03.local podman[79725]: 2026-03-09 00:25:38.089926936 +0000 UTC m=+0.078251752 container attach cddfa17cd22f6c4ad47ec5b6e9561fe8c5a9e6fb7acf452bbd224951155627a0 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1-deactivate, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:25:38.340 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:38 vm03.local podman[79725]: 2026-03-09 00:25:38.020742487 +0000 UTC m=+0.009067323 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:25:38.340 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 00:25:38 vm03.local podman[79725]: 2026-03-09 00:25:38.225811946 +0000 UTC m=+0.214136773 container died cddfa17cd22f6c4ad47ec5b6e9561fe8c5a9e6fb7acf452bbd224951155627a0 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86-osd-1-deactivate, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2) 2026-03-09T00:25:38.363 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86@osd.1.service' 2026-03-09T00:25:38.404 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:25:38.404 INFO:tasks.cephadm.osd.1:Stopped osd.1 2026-03-09T00:25:38.404 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 --force --keep-logs 2026-03-09T00:25:38.589 INFO:teuthology.orchestra.run.vm03.stdout:Deleting cluster with fsid: 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:25:49.378 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 --force --keep-logs 2026-03-09T00:25:49.512 INFO:teuthology.orchestra.run.vm06.stdout:Deleting cluster with fsid: 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:25:49.592 INFO:teuthology.orchestra.run.vm06.stderr:Traceback (most recent call last): 2026-03-09T00:25:49.592 INFO:teuthology.orchestra.run.vm06.stderr: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:25:49.592 INFO:teuthology.orchestra.run.vm06.stderr: return _run_code(code, main_globals, None, 2026-03-09T00:25:49.592 INFO:teuthology.orchestra.run.vm06.stderr: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:25:49.592 INFO:teuthology.orchestra.run.vm06.stderr: exec(code, run_globals) 2026-03-09T00:25:49.592 INFO:teuthology.orchestra.run.vm06.stderr: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:25:49.592 INFO:teuthology.orchestra.run.vm06.stderr: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:25:49.592 INFO:teuthology.orchestra.run.vm06.stderr: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 4338, in command_rm_cluster 2026-03-09T00:25:49.592 INFO:teuthology.orchestra.run.vm06.stderr: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 4402, in _rm_cluster 2026-03-09T00:25:49.592 INFO:teuthology.orchestra.run.vm06.stderr: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 4328, in get_ceph_cluster_count 2026-03-09T00:25:49.593 INFO:teuthology.orchestra.run.vm06.stderr:FileNotFoundError: [Errno 2] No such file or directory: '/var/lib/ceph' 2026-03-09T00:25:49.609 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:25:49.609 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T00:25:49.641 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T00:25:49.673 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-09T00:25:49.673 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/crash to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/310/remote/vm03/crash 2026-03-09T00:25:49.673 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/crash -- . 2026-03-09T00:25:49.711 INFO:teuthology.orchestra.run.vm03.stderr:tar: /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/crash: Cannot open: No such file or directory 2026-03-09T00:25:49.711 INFO:teuthology.orchestra.run.vm03.stderr:tar: Error is not recoverable: exiting now 2026-03-09T00:25:49.712 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/crash to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/310/remote/vm06/crash 2026-03-09T00:25:49.713 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/crash -- . 2026-03-09T00:25:49.747 INFO:teuthology.orchestra.run.vm06.stderr:tar: /var/lib/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/crash: Cannot open: No such file or directory 2026-03-09T00:25:49.748 INFO:teuthology.orchestra.run.vm06.stderr:tar: Error is not recoverable: exiting now 2026-03-09T00:25:49.749 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-09T00:25:49.749 DEBUG:teuthology.orchestra.run.vm03:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | head -n 1 2026-03-09T00:25:49.781 INFO:tasks.cephadm:Compressing logs... 2026-03-09T00:25:49.782 DEBUG:teuthology.orchestra.run.vm03:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T00:25:49.824 DEBUG:teuthology.orchestra.run.vm06:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T00:25:49.854 INFO:teuthology.orchestra.run.vm06.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T00:25:49.854 INFO:teuthology.orchestra.run.vm06.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T00:25:49.854 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T00:25:49.854 INFO:teuthology.orchestra.run.vm03.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T00:25:49.855 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mon.a.log 2026-03-09T00:25:49.855 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph.log 2026-03-09T00:25:49.856 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/cephadm.log: 70.7% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T00:25:49.857 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/cephadm.log: /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mon.a.log: 91.2% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T00:25:49.857 INFO:teuthology.orchestra.run.vm06.stderr: 2026-03-09T00:25:49.857 INFO:teuthology.orchestra.run.vm06.stderr:real 0m0.014s 2026-03-09T00:25:49.857 INFO:teuthology.orchestra.run.vm06.stderr:user 0m0.004s 2026-03-09T00:25:49.857 INFO:teuthology.orchestra.run.vm06.stderr:sys 0m0.018s 2026-03-09T00:25:49.857 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph.audit.log 2026-03-09T00:25:49.858 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph.log: 83.5% -- replaced with /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph.log.gz 2026-03-09T00:25:49.858 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mgr.x.log 2026-03-09T00:25:49.859 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph.audit.log: 88.9% -- replaced with /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph.audit.log.gz 2026-03-09T00:25:49.859 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph.cephadm.log 2026-03-09T00:25:49.870 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mgr.x.log: gzip -5 --verbose -- /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-volume.log 2026-03-09T00:25:49.871 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph.cephadm.log: 76.2% -- replaced with /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph.cephadm.log.gz 2026-03-09T00:25:49.871 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-osd.0.log 2026-03-09T00:25:49.879 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-volume.log: 95.1% -- replaced with /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-volume.log.gz 2026-03-09T00:25:49.879 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-osd.1.log 2026-03-09T00:25:49.893 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-osd.0.log: gzip -5 --verbose -- /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mds.cephfs.vm03.nrbsbw.log 2026-03-09T00:25:49.900 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-osd.1.log: 89.6% -- replaced with /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mgr.x.log.gz 2026-03-09T00:25:49.900 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mds.cephfs.vm03.enrdyu.log 2026-03-09T00:25:49.901 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mds.cephfs.vm03.nrbsbw.log: 77.1% -- replaced with /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mds.cephfs.vm03.nrbsbw.log.gz 2026-03-09T00:25:49.904 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mds.cephfs.vm03.enrdyu.log: 68.9% -- replaced with /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mds.cephfs.vm03.enrdyu.log.gz 2026-03-09T00:25:49.931 INFO:teuthology.orchestra.run.vm03.stderr: 91.1% -- replaced with /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-mon.a.log.gz 2026-03-09T00:25:49.960 INFO:teuthology.orchestra.run.vm03.stderr: 95.3% -- replaced with /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-osd.0.log.gz 2026-03-09T00:25:49.997 INFO:teuthology.orchestra.run.vm03.stderr: 95.2% -- replaced with /var/log/ceph/06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86/ceph-osd.1.log.gz 2026-03-09T00:25:49.998 INFO:teuthology.orchestra.run.vm03.stderr: 2026-03-09T00:25:49.998 INFO:teuthology.orchestra.run.vm03.stderr:real 0m0.158s 2026-03-09T00:25:49.998 INFO:teuthology.orchestra.run.vm03.stderr:user 0m0.233s 2026-03-09T00:25:49.998 INFO:teuthology.orchestra.run.vm03.stderr:sys 0m0.035s 2026-03-09T00:25:49.999 INFO:tasks.cephadm:Archiving logs... 2026-03-09T00:25:49.999 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/log/ceph to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/310/remote/vm03/log 2026-03-09T00:25:49.999 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T00:25:50.083 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/log/ceph to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/310/remote/vm06/log 2026-03-09T00:25:50.084 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T00:25:50.110 INFO:tasks.cephadm:Removing cluster... 2026-03-09T00:25:50.110 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 --force 2026-03-09T00:25:50.262 INFO:teuthology.orchestra.run.vm03.stdout:Deleting cluster with fsid: 06e0ecac-1b4e-11f1-a2bb-e7a4818e8e86 2026-03-09T00:25:50.373 INFO:tasks.cephadm:Removing cephadm ... 2026-03-09T00:25:50.373 DEBUG:teuthology.orchestra.run.vm03:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T00:25:50.388 DEBUG:teuthology.orchestra.run.vm06:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T00:25:50.407 INFO:tasks.cephadm:Teardown complete 2026-03-09T00:25:50.408 DEBUG:teuthology.run_tasks:Unwinding manager cephadm.deploy_samba_ad_dc 2026-03-09T00:25:50.410 DEBUG:teuthology.orchestra.run.vm06:> sudo podman stop samba-ad 2026-03-09T00:25:50.694 INFO:teuthology.orchestra.run.vm06.stdout:samba-ad 2026-03-09T00:25:50.700 DEBUG:teuthology.orchestra.run.vm06:> sudo podman rm samba-ad 2026-03-09T00:25:50.767 INFO:teuthology.orchestra.run.vm06.stdout:samba-ad 2026-03-09T00:25:50.771 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -rf /var/lib/samba/container/logs /var/lib/samba/container/data 2026-03-09T00:25:50.794 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-09T00:25:50.796 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-09T00:25:50.796 DEBUG:teuthology.orchestra.run.vm03:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T00:25:50.799 DEBUG:teuthology.orchestra.run.vm06:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T00:25:50.816 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-09T00:25:50.821 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T00:25:50.822 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-09T00:25:50.822 INFO:teuthology.orchestra.run.vm03.stdout:^+ bond1-1201.fsn-lf-s02.pr> 2 6 177 62 -177us[ -153us] +/- 20ms 2026-03-09T00:25:50.822 INFO:teuthology.orchestra.run.vm03.stdout:^+ 185.252.140.125 2 6 177 61 -583us[ -558us] +/- 21ms 2026-03-09T00:25:50.822 INFO:teuthology.orchestra.run.vm03.stdout:^* vps-ber1.orleans.ddnss.de 2 6 177 61 -1042us[-1017us] +/- 15ms 2026-03-09T00:25:50.822 INFO:teuthology.orchestra.run.vm03.stdout:^+ mail.morbitzer.de 2 6 177 61 +2338us[+2363us] +/- 21ms 2026-03-09T00:25:50.853 INFO:teuthology.orchestra.run.vm06.stderr:bash: line 1: ntpq: command not found 2026-03-09T00:25:50.857 INFO:teuthology.orchestra.run.vm06.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T00:25:50.857 INFO:teuthology.orchestra.run.vm06.stdout:=============================================================================== 2026-03-09T00:25:50.857 INFO:teuthology.orchestra.run.vm06.stdout:^+ mail.morbitzer.de 2 6 177 60 +2318us[+2318us] +/- 21ms 2026-03-09T00:25:50.857 INFO:teuthology.orchestra.run.vm06.stdout:^+ bond1-1201.fsn-lf-s02.pr> 2 6 177 61 -177us[ -174us] +/- 20ms 2026-03-09T00:25:50.857 INFO:teuthology.orchestra.run.vm06.stdout:^+ 185.252.140.125 2 6 177 61 -604us[ -601us] +/- 21ms 2026-03-09T00:25:50.858 INFO:teuthology.orchestra.run.vm06.stdout:^* vps-ber1.orleans.ddnss.de 2 6 177 60 -1115us[-1112us] +/- 15ms 2026-03-09T00:25:50.858 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-09T00:25:50.861 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-09T00:25:50.861 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-09T00:25:50.864 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-09T00:25:50.866 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-09T00:25:50.869 INFO:teuthology.task.internal:Duration was 450.187328 seconds 2026-03-09T00:25:50.869 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-09T00:25:50.871 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-09T00:25:50.871 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T00:25:50.873 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T00:25:50.918 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T00:25:50.942 INFO:teuthology.orchestra.run.vm06.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T00:25:51.278 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-09T00:25:51.278 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm03.local 2026-03-09T00:25:51.278 DEBUG:teuthology.orchestra.run.vm03:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T00:25:51.308 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm06.local 2026-03-09T00:25:51.309 DEBUG:teuthology.orchestra.run.vm06:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T00:25:51.333 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-09T00:25:51.333 DEBUG:teuthology.orchestra.run.vm03:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T00:25:51.350 DEBUG:teuthology.orchestra.run.vm06:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T00:25:51.999 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-09T00:25:51.999 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T00:25:52.001 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T00:25:52.028 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T00:25:52.029 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T00:25:52.029 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T00:25:52.029 INFO:teuthology.orchestra.run.vm03.stderr: --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T00:25:52.030 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: /home/ubuntu/cephtest/archive/syslog/journalctl.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T00:25:52.033 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T00:25:52.033 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T00:25:52.033 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T00:25:52.033 INFO:teuthology.orchestra.run.vm06.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T00:25:52.034 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T00:25:52.190 INFO:teuthology.orchestra.run.vm03.stderr: 98.1% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T00:25:52.241 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.5% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T00:25:52.244 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-09T00:25:52.247 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-09T00:25:52.247 DEBUG:teuthology.orchestra.run.vm03:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T00:25:52.277 DEBUG:teuthology.orchestra.run.vm06:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T00:25:52.312 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-09T00:25:52.315 DEBUG:teuthology.orchestra.run.vm03:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T00:25:52.319 DEBUG:teuthology.orchestra.run.vm06:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T00:25:52.342 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = core 2026-03-09T00:25:52.376 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = core 2026-03-09T00:25:52.389 DEBUG:teuthology.orchestra.run.vm03:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T00:25:52.413 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:25:52.413 DEBUG:teuthology.orchestra.run.vm06:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T00:25:52.445 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:25:52.445 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-09T00:25:52.448 INFO:teuthology.task.internal:Transferring archived files... 2026-03-09T00:25:52.448 DEBUG:teuthology.misc:Transferring archived files from vm03:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/310/remote/vm03 2026-03-09T00:25:52.448 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T00:25:52.488 DEBUG:teuthology.misc:Transferring archived files from vm06:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/310/remote/vm06 2026-03-09T00:25:52.488 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T00:25:52.519 INFO:teuthology.task.internal:Removing archive directory... 2026-03-09T00:25:52.519 DEBUG:teuthology.orchestra.run.vm03:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T00:25:52.530 DEBUG:teuthology.orchestra.run.vm06:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T00:25:52.580 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-09T00:25:52.583 INFO:teuthology.task.internal:Not uploading archives. 2026-03-09T00:25:52.583 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-09T00:25:52.585 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-09T00:25:52.585 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T00:25:52.587 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T00:25:52.601 INFO:teuthology.orchestra.run.vm03.stdout: 8532139 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 00:25 /home/ubuntu/cephtest 2026-03-09T00:25:52.640 INFO:teuthology.orchestra.run.vm06.stdout: 8532144 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 00:25 /home/ubuntu/cephtest 2026-03-09T00:25:52.641 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-09T00:25:52.647 INFO:teuthology.run:Summary data: description: orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_domain} duration: 450.1873278617859 owner: kyr success: true 2026-03-09T00:25:52.648 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T00:25:52.674 INFO:teuthology.run:pass