2026-03-09T14:54:08.975 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-09T14:54:08.983 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T14:54:09.017 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/516 branch: squid description: orch/cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_domain} email: null first_in_suite: false flavor: default job_id: '516' last_in_suite: false machine_type: vps name: kyr-2026-03-09_11:23:05-orch-squid-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 workunit: branch: tt-squid sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - mon.a - mgr.x - osd.0 - osd.1 - client.0 - - host.b - cephadm.exclude seed: 3443 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 targets: vm03.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBr81p8q6zcoUVXehjbBTUVdxaqCcSb3J6mPEBzllszA1aYGSJr0Nt+XKoWiBE/kKzHBKHyFBYYzSVCDqGtXjQg= vm04.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBl46eirWzlxBe6TlEv1XvoS6RBn4SJFK+ArifhcTLzsQyL3WQK640aa9w7i2QfduzA1JB+Qqv2Q1bvsKnMdBOc= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install runc nvmetcli nvme-cli -y - sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf - sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf - cephadm.deploy_samba_ad_dc: role: host.b - cephadm: null - cephadm.shell: host.a: - ceph fs volume create cephfs - cephadm.wait_for_service: service: mds.cephfs - cephadm.shell: host.a: - cmd: ceph fs subvolumegroup create cephfs g1 - cmd: ceph fs subvolume create cephfs sub1 --group-name=g1 --mode=0777 - cmd: ceph fs authorize cephfs client.smbdata / rw - cmd: ceph osd pool create .smb --yes-i-really-mean-it - cmd: ceph osd pool application enable .smb smb - cmd: rados --pool=.smb --namespace=admem1 put conf.toml /dev/stdin stdin: 'samba-container-config = "v0" [configs.admem1] shares = ["share1"] globals = ["default", "domain"] instance_name = "SAMBA" [shares.share1.options] "vfs objects" = "ceph" path = "/" "ceph:config_file" = "/etc/ceph/ceph.conf" "ceph:user_id" = "smbdata" "kernel share modes" = "no" "read only" = "no" "browseable" = "yes" [globals.default.options] "server min protocol" = "SMB2" "load printers" = "no" "printing" = "bsd" "printcap name" = "/dev/null" "disable spoolss" = "yes" "guest ok" = "no" [globals.domain.options] security = "ads" workgroup = "DOMAIN1" realm = "domain1.sink.test" "idmap config * : backend" = "autorid" "idmap config * : range" = "2000-9999999" ' - cmd: ceph config-key set smb/config/admem1/join1.json -i - stdin: '{"username": "Administrator", "password": "Passw0rd"} ' - cephadm.apply: specs: - cluster_id: admem1 config_uri: rados://.smb/admem1/conf.toml custom_dns: - '{{ctx.samba_ad_dc_ip}}' features: - domain include_ceph_users: - client.smbdata join_sources: - rados:mon-config-key:smb/config/admem1/join1.json placement: count: 1 service_id: admem1 service_type: smb - cephadm.wait_for_service: service: smb.admem1 - cephadm.exec: host.b: - sleep 30 - '{{ctx.samba_client_container_cmd|join('' '')}} smbclient -U DOMAIN1\\ckent%1115Rose. //{{''host.a''|role_to_remote|attr(''ip_address'')}}/share1 -c ls' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-09_11:23:05 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-09T14:54:09.017 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa; will attempt to use it 2026-03-09T14:54:09.018 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks 2026-03-09T14:54:09.018 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-09T14:54:09.018 INFO:teuthology.task.internal:Checking packages... 2026-03-09T14:54:09.018 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-09T14:54:09.018 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-09T14:54:09.018 INFO:teuthology.packaging:ref: None 2026-03-09T14:54:09.018 INFO:teuthology.packaging:tag: None 2026-03-09T14:54:09.018 INFO:teuthology.packaging:branch: squid 2026-03-09T14:54:09.018 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T14:54:09.018 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-09T14:54:09.786 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-09T14:54:09.788 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-09T14:54:09.788 INFO:teuthology.task.internal:no buildpackages task found 2026-03-09T14:54:09.788 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-09T14:54:09.789 INFO:teuthology.task.internal:Saving configuration 2026-03-09T14:54:09.794 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-09T14:54:09.795 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-09T14:54:09.801 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm03.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/516', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 14:52:55.237414', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:03', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBr81p8q6zcoUVXehjbBTUVdxaqCcSb3J6mPEBzllszA1aYGSJr0Nt+XKoWiBE/kKzHBKHyFBYYzSVCDqGtXjQg='} 2026-03-09T14:54:09.807 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm04.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/516', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 14:52:55.238110', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:04', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBl46eirWzlxBe6TlEv1XvoS6RBn4SJFK+ArifhcTLzsQyL3WQK640aa9w7i2QfduzA1JB+Qqv2Q1bvsKnMdBOc='} 2026-03-09T14:54:09.807 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-09T14:54:09.808 INFO:teuthology.task.internal:roles: ubuntu@vm03.local - ['host.a', 'mon.a', 'mgr.x', 'osd.0', 'osd.1', 'client.0'] 2026-03-09T14:54:09.808 INFO:teuthology.task.internal:roles: ubuntu@vm04.local - ['host.b', 'cephadm.exclude'] 2026-03-09T14:54:09.808 INFO:teuthology.run_tasks:Running task console_log... 2026-03-09T14:54:09.814 DEBUG:teuthology.task.console_log:vm03 does not support IPMI; excluding 2026-03-09T14:54:09.819 DEBUG:teuthology.task.console_log:vm04 does not support IPMI; excluding 2026-03-09T14:54:09.819 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7f63ac686170>, signals=[15]) 2026-03-09T14:54:09.819 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-09T14:54:09.820 INFO:teuthology.task.internal:Opening connections... 2026-03-09T14:54:09.820 DEBUG:teuthology.task.internal:connecting to ubuntu@vm03.local 2026-03-09T14:54:09.820 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T14:54:09.879 DEBUG:teuthology.task.internal:connecting to ubuntu@vm04.local 2026-03-09T14:54:09.879 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T14:54:09.940 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-09T14:54:09.941 DEBUG:teuthology.orchestra.run.vm03:> uname -m 2026-03-09T14:54:09.990 INFO:teuthology.orchestra.run.vm03.stdout:x86_64 2026-03-09T14:54:09.990 DEBUG:teuthology.orchestra.run.vm03:> cat /etc/os-release 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:NAME="CentOS Stream" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:VERSION="9" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:ID="centos" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:ID_LIKE="rhel fedora" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:VERSION_ID="9" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:PLATFORM_ID="platform:el9" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:ANSI_COLOR="0;31" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:LOGO="fedora-logo-icon" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:HOME_URL="https://centos.org/" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T14:54:10.044 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T14:54:10.045 INFO:teuthology.lock.ops:Updating vm03.local on lock server 2026-03-09T14:54:10.049 DEBUG:teuthology.orchestra.run.vm04:> uname -m 2026-03-09T14:54:10.063 INFO:teuthology.orchestra.run.vm04.stdout:x86_64 2026-03-09T14:54:10.063 DEBUG:teuthology.orchestra.run.vm04:> cat /etc/os-release 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:NAME="CentOS Stream" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:VERSION="9" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:ID="centos" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:ID_LIKE="rhel fedora" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:VERSION_ID="9" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:PLATFORM_ID="platform:el9" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:ANSI_COLOR="0;31" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:LOGO="fedora-logo-icon" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:HOME_URL="https://centos.org/" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T14:54:10.119 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T14:54:10.119 INFO:teuthology.lock.ops:Updating vm04.local on lock server 2026-03-09T14:54:10.124 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-09T14:54:10.126 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-09T14:54:10.127 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-09T14:54:10.127 DEBUG:teuthology.orchestra.run.vm03:> test '!' -e /home/ubuntu/cephtest 2026-03-09T14:54:10.129 DEBUG:teuthology.orchestra.run.vm04:> test '!' -e /home/ubuntu/cephtest 2026-03-09T14:54:10.174 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-09T14:54:10.175 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-09T14:54:10.175 DEBUG:teuthology.orchestra.run.vm03:> test -z $(ls -A /var/lib/ceph) 2026-03-09T14:54:10.184 DEBUG:teuthology.orchestra.run.vm04:> test -z $(ls -A /var/lib/ceph) 2026-03-09T14:54:10.197 INFO:teuthology.orchestra.run.vm03.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T14:54:10.229 INFO:teuthology.orchestra.run.vm04.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T14:54:10.229 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-09T14:54:10.237 DEBUG:teuthology.orchestra.run.vm03:> test -e /ceph-qa-ready 2026-03-09T14:54:10.252 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T14:54:10.436 DEBUG:teuthology.orchestra.run.vm04:> test -e /ceph-qa-ready 2026-03-09T14:54:10.451 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T14:54:10.635 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-09T14:54:10.637 INFO:teuthology.task.internal:Creating test directory... 2026-03-09T14:54:10.637 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T14:54:10.639 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T14:54:10.653 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-09T14:54:10.654 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-09T14:54:10.655 INFO:teuthology.task.internal:Creating archive directory... 2026-03-09T14:54:10.655 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T14:54:10.694 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T14:54:10.713 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-09T14:54:10.714 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-09T14:54:10.714 DEBUG:teuthology.orchestra.run.vm03:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T14:54:10.764 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T14:54:10.764 DEBUG:teuthology.orchestra.run.vm04:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T14:54:10.780 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T14:54:10.780 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T14:54:10.807 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T14:54:10.831 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T14:54:10.842 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T14:54:10.850 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T14:54:10.860 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T14:54:10.861 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-09T14:54:10.863 INFO:teuthology.task.internal:Configuring sudo... 2026-03-09T14:54:10.863 DEBUG:teuthology.orchestra.run.vm03:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T14:54:10.886 DEBUG:teuthology.orchestra.run.vm04:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T14:54:10.928 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-09T14:54:10.930 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-09T14:54:10.930 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T14:54:10.953 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T14:54:10.984 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T14:54:11.032 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T14:54:11.088 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T14:54:11.088 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T14:54:11.151 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T14:54:11.174 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T14:54:11.232 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-09T14:54:11.233 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T14:54:11.291 DEBUG:teuthology.orchestra.run.vm03:> sudo service rsyslog restart 2026-03-09T14:54:11.293 DEBUG:teuthology.orchestra.run.vm04:> sudo service rsyslog restart 2026-03-09T14:54:11.318 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T14:54:11.359 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T14:54:11.722 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-09T14:54:11.724 INFO:teuthology.task.internal:Starting timer... 2026-03-09T14:54:11.724 INFO:teuthology.run_tasks:Running task pcp... 2026-03-09T14:54:11.727 INFO:teuthology.run_tasks:Running task selinux... 2026-03-09T14:54:11.729 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0']} 2026-03-09T14:54:11.729 INFO:teuthology.task.selinux:Excluding vm03: VMs are not yet supported 2026-03-09T14:54:11.729 INFO:teuthology.task.selinux:Excluding vm04: VMs are not yet supported 2026-03-09T14:54:11.729 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-09T14:54:11.729 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-09T14:54:11.729 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-09T14:54:11.729 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-09T14:54:11.730 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-09T14:54:11.731 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-09T14:54:11.732 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-09T14:54:12.367 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-09T14:54:12.372 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-09T14:54:12.373 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventoryj6tdpmhq --limit vm03.local,vm04.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-09T14:57:34.264 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm03.local'), Remote(name='ubuntu@vm04.local')] 2026-03-09T14:57:34.265 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm03.local' 2026-03-09T14:57:34.265 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T14:57:34.327 DEBUG:teuthology.orchestra.run.vm03:> true 2026-03-09T14:57:34.406 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm03.local' 2026-03-09T14:57:34.406 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm04.local' 2026-03-09T14:57:34.406 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T14:57:34.475 DEBUG:teuthology.orchestra.run.vm04:> true 2026-03-09T14:57:34.551 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm04.local' 2026-03-09T14:57:34.551 INFO:teuthology.run_tasks:Running task clock... 2026-03-09T14:57:34.553 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-09T14:57:34.554 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T14:57:34.554 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T14:57:34.556 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T14:57:34.556 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T14:57:34.582 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-09T14:57:34.593 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-09T14:57:34.613 INFO:teuthology.orchestra.run.vm03.stderr:sudo: ntpd: command not found 2026-03-09T14:57:34.621 INFO:teuthology.orchestra.run.vm03.stdout:506 Cannot talk to daemon 2026-03-09T14:57:34.631 INFO:teuthology.orchestra.run.vm04.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-09T14:57:34.633 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-09T14:57:34.643 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-09T14:57:34.653 INFO:teuthology.orchestra.run.vm04.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-09T14:57:34.689 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-09T14:57:34.691 INFO:teuthology.orchestra.run.vm04.stderr:sudo: ntpd: command not found 2026-03-09T14:57:34.691 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T14:57:34.691 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-09T14:57:34.708 INFO:teuthology.orchestra.run.vm04.stdout:506 Cannot talk to daemon 2026-03-09T14:57:34.725 INFO:teuthology.orchestra.run.vm04.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-09T14:57:34.743 INFO:teuthology.orchestra.run.vm04.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-09T14:57:34.800 INFO:teuthology.orchestra.run.vm04.stderr:bash: line 1: ntpq: command not found 2026-03-09T14:57:34.802 INFO:teuthology.orchestra.run.vm04.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T14:57:34.802 INFO:teuthology.orchestra.run.vm04.stdout:=============================================================================== 2026-03-09T14:57:34.803 INFO:teuthology.run_tasks:Running task pexec... 2026-03-09T14:57:34.805 INFO:teuthology.task.pexec:Executing custom commands... 2026-03-09T14:57:34.806 DEBUG:teuthology.orchestra.run.vm03:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-09T14:57:34.806 DEBUG:teuthology.orchestra.run.vm04:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-09T14:57:34.807 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo dnf remove nvme-cli -y 2026-03-09T14:57:34.807 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo dnf install runc nvmetcli nvme-cli -y 2026-03-09T14:57:34.807 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-09T14:57:34.808 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-09T14:57:34.808 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm03.local 2026-03-09T14:57:34.808 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-09T14:57:34.808 INFO:teuthology.task.pexec:sudo dnf install runc nvmetcli nvme-cli -y 2026-03-09T14:57:34.808 INFO:teuthology.task.pexec:sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-09T14:57:34.808 INFO:teuthology.task.pexec:sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-09T14:57:34.847 DEBUG:teuthology.task.pexec:ubuntu@vm04.local< sudo dnf remove nvme-cli -y 2026-03-09T14:57:34.847 DEBUG:teuthology.task.pexec:ubuntu@vm04.local< sudo dnf install runc nvmetcli nvme-cli -y 2026-03-09T14:57:34.847 DEBUG:teuthology.task.pexec:ubuntu@vm04.local< sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-09T14:57:34.847 DEBUG:teuthology.task.pexec:ubuntu@vm04.local< sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-09T14:57:34.847 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm04.local 2026-03-09T14:57:34.847 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-09T14:57:34.847 INFO:teuthology.task.pexec:sudo dnf install runc nvmetcli nvme-cli -y 2026-03-09T14:57:34.847 INFO:teuthology.task.pexec:sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf 2026-03-09T14:57:34.847 INFO:teuthology.task.pexec:sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf 2026-03-09T14:57:34.997 INFO:teuthology.orchestra.run.vm03.stdout:No match for argument: nvme-cli 2026-03-09T14:57:34.997 INFO:teuthology.orchestra.run.vm03.stderr:No packages marked for removal. 2026-03-09T14:57:35.000 INFO:teuthology.orchestra.run.vm03.stdout:Dependencies resolved. 2026-03-09T14:57:35.001 INFO:teuthology.orchestra.run.vm03.stdout:Nothing to do. 2026-03-09T14:57:35.001 INFO:teuthology.orchestra.run.vm03.stdout:Complete! 2026-03-09T14:57:35.090 INFO:teuthology.orchestra.run.vm04.stdout:No match for argument: nvme-cli 2026-03-09T14:57:35.090 INFO:teuthology.orchestra.run.vm04.stderr:No packages marked for removal. 2026-03-09T14:57:35.093 INFO:teuthology.orchestra.run.vm04.stdout:Dependencies resolved. 2026-03-09T14:57:35.093 INFO:teuthology.orchestra.run.vm04.stdout:Nothing to do. 2026-03-09T14:57:35.093 INFO:teuthology.orchestra.run.vm04.stdout:Complete! 2026-03-09T14:57:35.392 INFO:teuthology.orchestra.run.vm03.stdout:Last metadata expiration check: 0:02:23 ago on Mon 09 Mar 2026 02:55:12 PM UTC. 2026-03-09T14:57:35.493 INFO:teuthology.orchestra.run.vm03.stdout:Dependencies resolved. 2026-03-09T14:57:35.493 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-09T14:57:35.493 INFO:teuthology.orchestra.run.vm03.stdout: Package Arch Version Repository Size 2026-03-09T14:57:35.493 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-09T14:57:35.493 INFO:teuthology.orchestra.run.vm03.stdout:Installing: 2026-03-09T14:57:35.493 INFO:teuthology.orchestra.run.vm03.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-09T14:57:35.493 INFO:teuthology.orchestra.run.vm03.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout: runc x86_64 4:1.4.0-2.el9 appstream 4.0 M 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout:Installing dependencies: 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout:Transaction Summary 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout:Install 7 Packages 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout:Total download size: 6.3 M 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout:Installed size: 24 M 2026-03-09T14:57:35.494 INFO:teuthology.orchestra.run.vm03.stdout:Downloading Packages: 2026-03-09T14:57:35.555 INFO:teuthology.orchestra.run.vm04.stdout:Last metadata expiration check: 0:02:45 ago on Mon 09 Mar 2026 02:54:50 PM UTC. 2026-03-09T14:57:35.694 INFO:teuthology.orchestra.run.vm04.stdout:Dependencies resolved. 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout:================================================================================ 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout: Package Arch Version Repository Size 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout:================================================================================ 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout:Installing: 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout: runc x86_64 4:1.4.0-2.el9 appstream 4.0 M 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout:Installing dependencies: 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout:Transaction Summary 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout:================================================================================ 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout:Install 7 Packages 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout:Total download size: 6.3 M 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout:Installed size: 24 M 2026-03-09T14:57:35.695 INFO:teuthology.orchestra.run.vm04.stdout:Downloading Packages: 2026-03-09T14:57:36.531 INFO:teuthology.orchestra.run.vm04.stdout:(1/7): nvmetcli-0.8-3.el9.noarch.rpm 256 kB/s | 44 kB 00:00 2026-03-09T14:57:36.533 INFO:teuthology.orchestra.run.vm04.stdout:(2/7): python3-configshell-1.1.30-1.el9.noarch. 415 kB/s | 72 kB 00:00 2026-03-09T14:57:36.615 INFO:teuthology.orchestra.run.vm03.stdout:(1/7): nvmetcli-0.8-3.el9.noarch.rpm 93 kB/s | 44 kB 00:00 2026-03-09T14:57:36.667 INFO:teuthology.orchestra.run.vm03.stdout:(2/7): python3-configshell-1.1.30-1.el9.noarch. 138 kB/s | 72 kB 00:00 2026-03-09T14:57:36.669 INFO:teuthology.orchestra.run.vm04.stdout:(3/7): nvme-cli-2.16-1.el9.x86_64.rpm 3.7 MB/s | 1.2 MB 00:00 2026-03-09T14:57:36.683 INFO:teuthology.orchestra.run.vm04.stdout:(4/7): python3-kmod-0.9-32.el9.x86_64.rpm 555 kB/s | 84 kB 00:00 2026-03-09T14:57:36.695 INFO:teuthology.orchestra.run.vm04.stdout:(5/7): python3-pyparsing-2.4.7-9.el9.noarch.rpm 928 kB/s | 150 kB 00:00 2026-03-09T14:57:36.750 INFO:teuthology.orchestra.run.vm04.stdout:(6/7): python3-urwid-2.1.2-4.el9.x86_64.rpm 10 MB/s | 837 kB 00:00 2026-03-09T14:57:36.843 INFO:teuthology.orchestra.run.vm03.stdout:(3/7): python3-kmod-0.9-32.el9.x86_64.rpm 370 kB/s | 84 kB 00:00 2026-03-09T14:57:37.042 INFO:teuthology.orchestra.run.vm03.stdout:(4/7): python3-urwid-2.1.2-4.el9.x86_64.rpm 4.1 MB/s | 837 kB 00:00 2026-03-09T14:57:37.083 INFO:teuthology.orchestra.run.vm03.stdout:(5/7): python3-pyparsing-2.4.7-9.el9.noarch.rpm 362 kB/s | 150 kB 00:00 2026-03-09T14:57:37.201 INFO:teuthology.orchestra.run.vm04.stdout:(7/7): runc-1.4.0-2.el9.x86_64.rpm 7.7 MB/s | 4.0 MB 00:00 2026-03-09T14:57:37.201 INFO:teuthology.orchestra.run.vm04.stdout:-------------------------------------------------------------------------------- 2026-03-09T14:57:37.201 INFO:teuthology.orchestra.run.vm04.stdout:Total 4.2 MB/s | 6.3 MB 00:01 2026-03-09T14:57:37.286 INFO:teuthology.orchestra.run.vm03.stdout:(6/7): runc-1.4.0-2.el9.x86_64.rpm 16 MB/s | 4.0 MB 00:00 2026-03-09T14:57:37.301 INFO:teuthology.orchestra.run.vm04.stdout:Running transaction check 2026-03-09T14:57:37.317 INFO:teuthology.orchestra.run.vm04.stdout:Transaction check succeeded. 2026-03-09T14:57:37.317 INFO:teuthology.orchestra.run.vm04.stdout:Running transaction test 2026-03-09T14:57:37.401 INFO:teuthology.orchestra.run.vm04.stdout:Transaction test succeeded. 2026-03-09T14:57:37.401 INFO:teuthology.orchestra.run.vm04.stdout:Running transaction 2026-03-09T14:57:37.614 INFO:teuthology.orchestra.run.vm04.stdout: Preparing : 1/1 2026-03-09T14:57:37.628 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/7 2026-03-09T14:57:37.644 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/7 2026-03-09T14:57:37.656 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-09T14:57:37.663 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-09T14:57:37.665 INFO:teuthology.orchestra.run.vm04.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/7 2026-03-09T14:57:37.736 INFO:teuthology.orchestra.run.vm04.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/7 2026-03-09T14:57:37.903 INFO:teuthology.orchestra.run.vm03.stdout:(7/7): nvme-cli-2.16-1.el9.x86_64.rpm 671 kB/s | 1.2 MB 00:01 2026-03-09T14:57:37.904 INFO:teuthology.orchestra.run.vm03.stdout:-------------------------------------------------------------------------------- 2026-03-09T14:57:37.904 INFO:teuthology.orchestra.run.vm03.stdout:Total 2.6 MB/s | 6.3 MB 00:02 2026-03-09T14:57:37.920 INFO:teuthology.orchestra.run.vm04.stdout: Installing : runc-4:1.4.0-2.el9.x86_64 6/7 2026-03-09T14:57:37.925 INFO:teuthology.orchestra.run.vm04.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-09T14:57:37.984 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction check 2026-03-09T14:57:37.994 INFO:teuthology.orchestra.run.vm03.stdout:Transaction check succeeded. 2026-03-09T14:57:37.994 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction test 2026-03-09T14:57:38.059 INFO:teuthology.orchestra.run.vm03.stdout:Transaction test succeeded. 2026-03-09T14:57:38.059 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction 2026-03-09T14:57:38.229 INFO:teuthology.orchestra.run.vm03.stdout: Preparing : 1/1 2026-03-09T14:57:38.240 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/7 2026-03-09T14:57:38.250 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/7 2026-03-09T14:57:38.259 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-09T14:57:38.266 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-09T14:57:38.268 INFO:teuthology.orchestra.run.vm03.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/7 2026-03-09T14:57:38.317 INFO:teuthology.orchestra.run.vm03.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/7 2026-03-09T14:57:38.358 INFO:teuthology.orchestra.run.vm04.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-09T14:57:38.358 INFO:teuthology.orchestra.run.vm04.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-09T14:57:38.358 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T14:57:38.449 INFO:teuthology.orchestra.run.vm03.stdout: Installing : runc-4:1.4.0-2.el9.x86_64 6/7 2026-03-09T14:57:38.453 INFO:teuthology.orchestra.run.vm03.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-09T14:57:38.790 INFO:teuthology.orchestra.run.vm03.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 7/7 2026-03-09T14:57:38.790 INFO:teuthology.orchestra.run.vm03.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-09T14:57:38.790 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T14:57:39.026 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/7 2026-03-09T14:57:39.027 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/7 2026-03-09T14:57:39.027 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-09T14:57:39.027 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-09T14:57:39.027 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/7 2026-03-09T14:57:39.027 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/7 2026-03-09T14:57:39.119 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : runc-4:1.4.0-2.el9.x86_64 7/7 2026-03-09T14:57:39.119 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T14:57:39.119 INFO:teuthology.orchestra.run.vm04.stdout:Installed: 2026-03-09T14:57:39.120 INFO:teuthology.orchestra.run.vm04.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-09T14:57:39.120 INFO:teuthology.orchestra.run.vm04.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-09T14:57:39.120 INFO:teuthology.orchestra.run.vm04.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-09T14:57:39.120 INFO:teuthology.orchestra.run.vm04.stdout: runc-4:1.4.0-2.el9.x86_64 2026-03-09T14:57:39.120 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T14:57:39.120 INFO:teuthology.orchestra.run.vm04.stdout:Complete! 2026-03-09T14:57:39.204 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/7 2026-03-09T14:57:39.204 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/7 2026-03-09T14:57:39.204 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/7 2026-03-09T14:57:39.204 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/7 2026-03-09T14:57:39.204 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/7 2026-03-09T14:57:39.204 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/7 2026-03-09T14:57:39.249 DEBUG:teuthology.parallel:result is None 2026-03-09T14:57:39.265 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : runc-4:1.4.0-2.el9.x86_64 7/7 2026-03-09T14:57:39.265 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T14:57:39.265 INFO:teuthology.orchestra.run.vm03.stdout:Installed: 2026-03-09T14:57:39.265 INFO:teuthology.orchestra.run.vm03.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-09T14:57:39.265 INFO:teuthology.orchestra.run.vm03.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-09T14:57:39.265 INFO:teuthology.orchestra.run.vm03.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-09T14:57:39.265 INFO:teuthology.orchestra.run.vm03.stdout: runc-4:1.4.0-2.el9.x86_64 2026-03-09T14:57:39.265 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T14:57:39.265 INFO:teuthology.orchestra.run.vm03.stdout:Complete! 2026-03-09T14:57:39.338 DEBUG:teuthology.parallel:result is None 2026-03-09T14:57:39.338 INFO:teuthology.run_tasks:Running task cephadm.deploy_samba_ad_dc... 2026-03-09T14:57:39.389 INFO:tasks.cephadm:Testing if podman is available 2026-03-09T14:57:39.389 DEBUG:teuthology.orchestra.run.vm04:> sudo podman --help 2026-03-09T14:57:39.462 INFO:teuthology.orchestra.run.vm04.stdout:Manage pods, containers and images 2026-03-09T14:57:39.462 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T14:57:39.462 INFO:teuthology.orchestra.run.vm04.stdout:Usage: 2026-03-09T14:57:39.462 INFO:teuthology.orchestra.run.vm04.stdout: podman [options] [command] 2026-03-09T14:57:39.462 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout:Available Commands: 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: artifact Manage OCI artifacts 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: attach Attach to a running container 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: auto-update Auto update containers according to their auto-update policy 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: build Build an image using instructions from Containerfiles 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: commit Create new image based on the changed container 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: compose Run compose workloads via an external provider such as docker-compose or podman-compose 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: container Manage containers 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: cp Copy files/folders between a container and the local filesystem 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: create Create but do not start a container 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: diff Display the changes to the object's file system 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: events Show podman system events 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: exec Run a process in a running container 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: export Export container's filesystem contents as a tar archive 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: farm Farm out builds to remote machines 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: generate Generate structured data based on containers, pods or volumes 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: healthcheck Manage health checks on containers 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: help Help about any command 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: history Show history of a specified image 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: image Manage images 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: images List images in local storage 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: import Import a tarball to create a filesystem image 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: info Display podman system information 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: init Initialize one or more containers 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: inspect Display the configuration of object denoted by ID 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: kill Kill one or more running containers with a specific signal 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: kube Play containers, pods or volumes from a structured file 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: load Load image(s) from a tar archive 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: login Log in to a container registry 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: logout Log out of a container registry 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: logs Fetch the logs of one or more containers 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: machine Manage a virtual machine 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: manifest Manipulate manifest lists and image indexes 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: mount Mount a working container's root filesystem 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: network Manage networks 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: pause Pause all the processes in one or more containers 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: pod Manage pods 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: port List port mappings or a specific mapping for the container 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: ps List containers 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: pull Pull an image from a registry 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: push Push an image to a specified destination 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: quadlet Allows users to manage Quadlets 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: rename Rename an existing container 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: restart Restart one or more containers 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: rm Remove one or more containers 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: rmi Remove one or more images from local storage 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: run Run a command in a new container 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: save Save image(s) to an archive 2026-03-09T14:57:39.463 INFO:teuthology.orchestra.run.vm04.stdout: search Search registry for image 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: secret Manage secrets 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: start Start one or more containers 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: stats Display a live stream of container resource usage statistics 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: stop Stop one or more containers 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: system Manage podman 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: tag Add an additional name to a local image 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: top Display the running processes of a container 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: unmount Unmount working container's root filesystem 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: unpause Unpause the processes in one or more containers 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: unshare Run a command in a modified user namespace 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: untag Remove a name from a local image 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: update Update an existing container 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: version Display the Podman version information 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: volume Manage volumes 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: wait Block on one or more containers 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout:Options: 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --cdi-spec-dir stringArray Set the CDI spec directory path (may be set multiple times) (default [/etc/cdi,/var/run/cdi]) 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --cgroup-manager string Cgroup manager to use ("cgroupfs"|"systemd") (default "systemd") 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --config string Path to directory containing authentication config file 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --conmon string Path of the conmon binary 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: -c, --connection string Connection to use for remote Podman service (CONTAINER_CONNECTION) 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --events-backend string Events backend to use ("file"|"journald"|"none") (default "journald") 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --help Help for podman 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --hooks-dir stringArray Set the OCI hooks directory path (may be set multiple times) (default [/usr/share/containers/oci/hooks.d]) 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --identity string path to SSH identity file, (CONTAINER_SSHKEY) 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --imagestore string Path to the 'image store', different from 'graph root', use this to split storing the image into a separate 'image store', see 'man containers-storage.conf' for details 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --log-level string Log messages above specified level (trace, debug, info, warn, warning, error, fatal, panic) (default "warn") 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --module stringArray Load the containers.conf(5) module 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --network-cmd-path string Path to the command for configuring the network 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --network-config-dir string Path of the configuration directory for networks 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --out string Send output (stdout) from podman to a file 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: -r, --remote Access remote Podman service 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --root string Path to the graph root directory where images, containers, etc. are stored 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --runroot string Path to the 'run directory' where all state information is stored 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --runtime string Path to the OCI-compatible binary used to run containers. (default "runc") 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --runtime-flag stringArray add global flags for the container runtime 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --ssh string define the ssh mode (default "golang") 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --storage-driver string Select which storage driver is used to manage storage of images and containers 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --storage-opt stringArray Used to pass an option to the storage driver 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --syslog Output podman-internal logs to syslog as well as the console (default false) 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --tls-ca string path to TLS certificate Authority PEM file for remote. 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --tls-cert string path to TLS client certificate PEM file for remote. 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --tls-key string path to TLS client certificate private key PEM file for remote. 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --tmpdir string Path to the tmp directory for libpod state content. 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: Note: use the environment variable 'TMPDIR' to change the temporary storage location for container images, '/var/tmp'. 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: (default "/run/libpod") 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --transient-store Enable transient container storage 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --url string URL to access Podman service (CONTAINER_HOST) (default "unix:///run/podman/podman.sock") 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: -v, --version version for podman 2026-03-09T14:57:39.464 INFO:teuthology.orchestra.run.vm04.stdout: --volumepath string Path to the volume directory in which volume data is stored 2026-03-09T14:57:39.468 DEBUG:teuthology.orchestra.run.vm04:> sudo podman pull quay.io/samba.org/samba-ad-server:latest 2026-03-09T14:57:39.563 INFO:teuthology.orchestra.run.vm04.stderr:Trying to pull quay.io/samba.org/samba-ad-server:latest... 2026-03-09T14:57:40.674 INFO:teuthology.orchestra.run.vm04.stderr:Getting image source signatures 2026-03-09T14:57:40.674 INFO:teuthology.orchestra.run.vm04.stderr:Copying blob sha256:62a1f31ed0bb768484d676b5f4199490a0ef7149e0d33ef5020f1da5334c40df 2026-03-09T14:57:40.674 INFO:teuthology.orchestra.run.vm04.stderr:Copying blob sha256:c0920deb4092ae59f2370126b40d0ac9196853983118586d2d2f6e347ef1d845 2026-03-09T14:57:40.674 INFO:teuthology.orchestra.run.vm04.stderr:Copying blob sha256:708792c66f64961ac2acf26b515f9a1ead2f27a8d2478a21e82da9bb485205d3 2026-03-09T14:57:40.674 INFO:teuthology.orchestra.run.vm04.stderr:Copying blob sha256:cd7ba9a7bc37ac1a55979cd5f3d20903c275d251cf38ceb51cacff1f5f96ae72 2026-03-09T14:57:40.674 INFO:teuthology.orchestra.run.vm04.stderr:Copying blob sha256:001ffc0a1d8385a856651332438933f37dcd1413b095ec35d55b737a52e0a704 2026-03-09T14:57:40.674 INFO:teuthology.orchestra.run.vm04.stderr:Copying blob sha256:32f46c1320dbd4aa5db0c3c7eb552b73f601eee32b3971afa9315a7a5bc99d4b 2026-03-09T14:57:58.866 INFO:teuthology.orchestra.run.vm04.stderr:Copying config sha256:4713b105ffd2255e32e8b9dec6c5e94988a5706fac59a2989f19b1b472c3c536 2026-03-09T14:57:58.869 INFO:teuthology.orchestra.run.vm04.stderr:Writing manifest to image destination 2026-03-09T14:57:58.891 INFO:teuthology.orchestra.run.vm04.stdout:4713b105ffd2255e32e8b9dec6c5e94988a5706fac59a2989f19b1b472c3c536 2026-03-09T14:57:58.898 DEBUG:teuthology.orchestra.run.vm04:> sudo podman pull quay.io/samba.org/samba-client:latest 2026-03-09T14:57:58.954 INFO:teuthology.orchestra.run.vm04.stderr:Trying to pull quay.io/samba.org/samba-client:latest... 2026-03-09T14:58:00.185 INFO:teuthology.orchestra.run.vm04.stderr:Getting image source signatures 2026-03-09T14:58:00.185 INFO:teuthology.orchestra.run.vm04.stderr:Copying blob sha256:ebf30006f6881eb69abb9d2503d1076468657e1c29d69b17215175c43ffad511 2026-03-09T14:58:00.185 INFO:teuthology.orchestra.run.vm04.stderr:Copying blob sha256:62a1f31ed0bb768484d676b5f4199490a0ef7149e0d33ef5020f1da5334c40df 2026-03-09T14:58:08.339 INFO:teuthology.orchestra.run.vm04.stderr:Copying config sha256:c060b5405e2be405765eab4bc4b30f141a78581b7e1c59fecadfe4c356f5fc66 2026-03-09T14:58:08.341 INFO:teuthology.orchestra.run.vm04.stderr:Writing manifest to image destination 2026-03-09T14:58:08.353 INFO:teuthology.orchestra.run.vm04.stdout:c060b5405e2be405765eab4bc4b30f141a78581b7e1c59fecadfe4c356f5fc66 2026-03-09T14:58:08.361 DEBUG:teuthology.orchestra.run.vm04:> ss -lunH 2026-03-09T14:58:08.390 INFO:teuthology.orchestra.run.vm04.stdout:UNCONN 0 0 0.0.0.0:111 0.0.0.0:* 2026-03-09T14:58:08.390 INFO:teuthology.orchestra.run.vm04.stdout:UNCONN 0 0 127.0.0.1:323 0.0.0.0:* 2026-03-09T14:58:08.390 INFO:teuthology.orchestra.run.vm04.stdout:UNCONN 0 0 [::]:111 [::]:* 2026-03-09T14:58:08.390 INFO:teuthology.orchestra.run.vm04.stdout:UNCONN 0 0 [::1]:323 [::]:* 2026-03-09T14:58:08.390 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /var/lib/samba/container/logs /var/lib/samba/container/data 2026-03-09T14:58:08.464 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /var/tmp/samba 2026-03-09T14:58:08.533 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-09T14:58:08.533 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/var/tmp/samba/container.json 2026-03-09T14:58:08.608 DEBUG:teuthology.orchestra.run.vm04:> sudo podman run -d --name=samba-ad --network=host --privileged --volume=/var/tmp/samba:/etc/samba-container:ro -eSAMBACC_CONFIG=/etc/samba-container/container.json quay.io/samba.org/samba-ad-server:latest 2026-03-09T14:58:08.792 INFO:teuthology.orchestra.run.vm04.stdout:813869eb5419a1311a4920f8b8c342ec7e0c00708909fa16d58d84bb1092b1e6 2026-03-09T14:58:09.046 INFO:tasks.cephadm:Probing SMB status of DC 192.168.123.104, idx=0 2026-03-09T14:58:09.047 DEBUG:teuthology.orchestra.run.vm04:> sudo podman run --rm --net=host --dns=192.168.123.104 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //domain1.sink.test/sysvol -c ls 2026-03-09T14:58:09.294 INFO:teuthology.orchestra.run.vm04.stderr:do_connect: Connection to domain1.sink.test failed (Error NT_STATUS_UNSUCCESSFUL) 2026-03-09T14:58:09.425 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T14:58:09.925 INFO:tasks.cephadm:Probing SMB status of DC 192.168.123.104, idx=1 2026-03-09T14:58:09.925 DEBUG:teuthology.orchestra.run.vm04:> sudo podman run --rm --net=host --dns=192.168.123.104 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //domain1.sink.test/sysvol -c ls 2026-03-09T14:58:10.275 INFO:teuthology.orchestra.run.vm04.stderr:do_connect: Connection to domain1.sink.test failed (Error NT_STATUS_UNSUCCESSFUL) 2026-03-09T14:58:10.419 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T14:58:11.420 INFO:tasks.cephadm:Probing SMB status of DC 192.168.123.104, idx=2 2026-03-09T14:58:11.420 DEBUG:teuthology.orchestra.run.vm04:> sudo podman run --rm --net=host --dns=192.168.123.104 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //domain1.sink.test/sysvol -c ls 2026-03-09T14:58:11.639 INFO:teuthology.orchestra.run.vm04.stderr:do_connect: Connection to domain1.sink.test failed (Error NT_STATUS_UNSUCCESSFUL) 2026-03-09T14:58:11.772 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T14:58:13.774 INFO:tasks.cephadm:Probing SMB status of DC 192.168.123.104, idx=3 2026-03-09T14:58:13.774 DEBUG:teuthology.orchestra.run.vm04:> sudo podman run --rm --net=host --dns=192.168.123.104 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //domain1.sink.test/sysvol -c ls 2026-03-09T14:58:14.091 INFO:teuthology.orchestra.run.vm04.stderr:do_connect: Connection to domain1.sink.test failed (Error NT_STATUS_UNSUCCESSFUL) 2026-03-09T14:58:14.223 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T14:58:18.224 INFO:tasks.cephadm:Probing SMB status of DC 192.168.123.104, idx=4 2026-03-09T14:58:18.224 DEBUG:teuthology.orchestra.run.vm04:> sudo podman run --rm --net=host --dns=192.168.123.104 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //domain1.sink.test/sysvol -c ls 2026-03-09T14:58:18.453 INFO:teuthology.orchestra.run.vm04.stdout: . D 0 Mon Mar 9 14:58:10 2026 2026-03-09T14:58:18.453 INFO:teuthology.orchestra.run.vm04.stdout: .. D 0 Mon Mar 9 14:58:10 2026 2026-03-09T14:58:18.453 INFO:teuthology.orchestra.run.vm04.stdout: domain1.sink.test D 0 Mon Mar 9 14:58:10 2026 2026-03-09T14:58:18.453 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T14:58:18.453 INFO:teuthology.orchestra.run.vm04.stdout: 41876460 blocks of size 1024. 38732200 blocks available 2026-03-09T14:58:18.612 INFO:tasks.cephadm:SMB status probe succeeded 2026-03-09T14:58:18.612 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-09T14:58:18.614 INFO:tasks.cephadm:Config: {'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-09T14:58:18.614 INFO:tasks.cephadm:Cluster image is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T14:58:18.614 INFO:tasks.cephadm:Cluster fsid is 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T14:58:18.615 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-09T14:58:18.615 INFO:tasks.cephadm:Monitor IPs: {'mon.a': '192.168.123.103'} 2026-03-09T14:58:18.615 INFO:tasks.cephadm:First mon is mon.a on vm03 2026-03-09T14:58:18.615 INFO:tasks.cephadm:First mgr is x 2026-03-09T14:58:18.615 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-09T14:58:18.615 DEBUG:teuthology.orchestra.run.vm03:> sudo hostname $(hostname -s) 2026-03-09T14:58:18.644 DEBUG:teuthology.orchestra.run.vm04:> sudo hostname $(hostname -s) 2026-03-09T14:58:18.685 INFO:tasks.cephadm:Downloading "compiled" cephadm from cachra 2026-03-09T14:58:18.685 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T14:58:19.312 INFO:tasks.cephadm:builder_project result: [{'url': 'https://3.chacra.ceph.com/r/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/flavors/default/', 'chacra_url': 'https://3.chacra.ceph.com/repos/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/flavors/default/', 'ref': 'squid', 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df', 'distro': 'centos', 'distro_version': '9', 'distro_codename': None, 'modified': '2026-02-25 18:55:15.146628', 'status': 'ready', 'flavor': 'default', 'project': 'ceph', 'archs': ['source', 'x86_64'], 'extra': {'version': '19.2.3-678-ge911bdeb', 'package_manager_version': '19.2.3-678.ge911bdeb', 'build_url': 'https://jenkins.ceph.com/job/ceph-dev-pipeline/3275/', 'root_build_cause': '', 'node_name': '10.20.192.26+soko16', 'job_name': 'ceph-dev-pipeline'}}] 2026-03-09T14:58:19.910 INFO:tasks.util.chacra:got chacra host 3.chacra.ceph.com, ref squid, sha1 e911bdebe5c8faa3800735d1568fcdca65db60df from https://shaman.ceph.com/api/search/?project=ceph&distros=centos%2F9%2Fx86_64&flavor=default&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T14:58:19.911 INFO:tasks.cephadm:Discovered cachra url: https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm 2026-03-09T14:58:19.911 INFO:tasks.cephadm:Downloading cephadm from url: https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm 2026-03-09T14:58:19.911 DEBUG:teuthology.orchestra.run.vm03:> curl --silent -L https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-09T14:58:21.318 INFO:teuthology.orchestra.run.vm03.stdout:-rw-r--r--. 1 ubuntu ubuntu 788355 Mar 9 14:58 /home/ubuntu/cephtest/cephadm 2026-03-09T14:58:21.318 DEBUG:teuthology.orchestra.run.vm04:> curl --silent -L https://3.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/centos/9/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-09T14:58:22.691 INFO:teuthology.orchestra.run.vm04.stdout:-rw-r--r--. 1 ubuntu ubuntu 788355 Mar 9 14:58 /home/ubuntu/cephtest/cephadm 2026-03-09T14:58:22.691 DEBUG:teuthology.orchestra.run.vm03:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-09T14:58:22.712 DEBUG:teuthology.orchestra.run.vm04:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-09T14:58:22.737 INFO:tasks.cephadm:Pulling image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on all hosts... 2026-03-09T14:58:22.737 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-09T14:58:22.754 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-09T14:58:22.915 INFO:teuthology.orchestra.run.vm03.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T14:58:22.916 INFO:teuthology.orchestra.run.vm04.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T14:59:42.352 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T14:59:42.352 INFO:teuthology.orchestra.run.vm04.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-09T14:59:42.352 INFO:teuthology.orchestra.run.vm04.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-09T14:59:42.352 INFO:teuthology.orchestra.run.vm04.stdout: "repo_digests": [ 2026-03-09T14:59:42.352 INFO:teuthology.orchestra.run.vm04.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-09T14:59:42.352 INFO:teuthology.orchestra.run.vm04.stdout: ] 2026-03-09T14:59:42.352 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T14:59:51.668 INFO:teuthology.orchestra.run.vm03.stdout:{ 2026-03-09T14:59:51.669 INFO:teuthology.orchestra.run.vm03.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-09T14:59:51.669 INFO:teuthology.orchestra.run.vm03.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-09T14:59:51.669 INFO:teuthology.orchestra.run.vm03.stdout: "repo_digests": [ 2026-03-09T14:59:51.669 INFO:teuthology.orchestra.run.vm03.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-09T14:59:51.669 INFO:teuthology.orchestra.run.vm03.stdout: ] 2026-03-09T14:59:51.669 INFO:teuthology.orchestra.run.vm03.stdout:} 2026-03-09T14:59:51.688 DEBUG:teuthology.orchestra.run.vm03:> sudo mkdir -p /etc/ceph 2026-03-09T14:59:51.718 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /etc/ceph 2026-03-09T14:59:51.748 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 777 /etc/ceph 2026-03-09T14:59:51.784 DEBUG:teuthology.orchestra.run.vm04:> sudo chmod 777 /etc/ceph 2026-03-09T14:59:51.818 INFO:tasks.cephadm:Writing seed config... 2026-03-09T14:59:51.818 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-09T14:59:51.818 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-09T14:59:51.818 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-09T14:59:51.818 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-09T14:59:51.819 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-09T14:59:51.819 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-09T14:59:51.819 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-09T14:59:51.819 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-09T14:59:51.819 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T14:59:51.819 DEBUG:teuthology.orchestra.run.vm03:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-09T14:59:51.843 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = true bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-09T14:59:51.843 DEBUG:teuthology.orchestra.run.vm03:mon.a> sudo journalctl -f -n 0 -u ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mon.a.service 2026-03-09T14:59:51.885 DEBUG:teuthology.orchestra.run.vm03:mgr.x> sudo journalctl -f -n 0 -u ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mgr.x.service 2026-03-09T14:59:51.928 INFO:tasks.cephadm:Bootstrapping... 2026-03-09T14:59:51.928 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df -v bootstrap --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id x --orphan-initial-daemons --skip-monitoring-stack --mon-ip 192.168.123.103 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-09T14:59:52.078 INFO:teuthology.orchestra.run.vm03.stdout:-------------------------------------------------------------------------------- 2026-03-09T14:59:52.078 INFO:teuthology.orchestra.run.vm03.stdout:cephadm ['--image', 'quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df', '-v', 'bootstrap', '--fsid', '6884f6b8-1bc8-11f1-a1b7-432e3f447ddd', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-id', 'a', '--mgr-id', 'x', '--orphan-initial-daemons', '--skip-monitoring-stack', '--mon-ip', '192.168.123.103', '--skip-admin-label'] 2026-03-09T14:59:52.079 INFO:teuthology.orchestra.run.vm03.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-09T14:59:52.079 INFO:teuthology.orchestra.run.vm03.stdout:Verifying podman|docker is present... 2026-03-09T14:59:52.099 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 5.8.0 2026-03-09T14:59:52.099 INFO:teuthology.orchestra.run.vm03.stdout:Verifying lvm2 is present... 2026-03-09T14:59:52.099 INFO:teuthology.orchestra.run.vm03.stdout:Verifying time synchronization is in place... 2026-03-09T14:59:52.108 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-09T14:59:52.108 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-09T14:59:52.114 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-09T14:59:52.115 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout inactive 2026-03-09T14:59:52.121 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout enabled 2026-03-09T14:59:52.128 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout active 2026-03-09T14:59:52.128 INFO:teuthology.orchestra.run.vm03.stdout:Unit chronyd.service is enabled and running 2026-03-09T14:59:52.128 INFO:teuthology.orchestra.run.vm03.stdout:Repeating the final host check... 2026-03-09T14:59:52.151 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 5.8.0 2026-03-09T14:59:52.151 INFO:teuthology.orchestra.run.vm03.stdout:podman (/bin/podman) version 5.8.0 is present 2026-03-09T14:59:52.151 INFO:teuthology.orchestra.run.vm03.stdout:systemctl is present 2026-03-09T14:59:52.151 INFO:teuthology.orchestra.run.vm03.stdout:lvcreate is present 2026-03-09T14:59:52.158 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-09T14:59:52.158 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-09T14:59:52.164 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-09T14:59:52.164 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout inactive 2026-03-09T14:59:52.172 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout enabled 2026-03-09T14:59:52.178 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout active 2026-03-09T14:59:52.178 INFO:teuthology.orchestra.run.vm03.stdout:Unit chronyd.service is enabled and running 2026-03-09T14:59:52.178 INFO:teuthology.orchestra.run.vm03.stdout:Host looks OK 2026-03-09T14:59:52.178 INFO:teuthology.orchestra.run.vm03.stdout:Cluster fsid: 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T14:59:52.179 INFO:teuthology.orchestra.run.vm03.stdout:Acquiring lock 140587508221648 on /run/cephadm/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd.lock 2026-03-09T14:59:52.179 INFO:teuthology.orchestra.run.vm03.stdout:Lock 140587508221648 acquired on /run/cephadm/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd.lock 2026-03-09T14:59:52.179 INFO:teuthology.orchestra.run.vm03.stdout:Verifying IP 192.168.123.103 port 3300 ... 2026-03-09T14:59:52.179 INFO:teuthology.orchestra.run.vm03.stdout:Verifying IP 192.168.123.103 port 6789 ... 2026-03-09T14:59:52.179 INFO:teuthology.orchestra.run.vm03.stdout:Base mon IP(s) is [192.168.123.103:3300, 192.168.123.103:6789], mon addrv is [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-09T14:59:52.183 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.103 metric 100 2026-03-09T14:59:52.183 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.103 metric 100 2026-03-09T14:59:52.186 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-09T14:59:52.186 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-09T14:59:52.190 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-09T14:59:52.190 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-09T14:59:52.190 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-09T14:59:52.190 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-03-09T14:59:52.190 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:3/64 scope link noprefixroute 2026-03-09T14:59:52.190 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-09T14:59:52.190 INFO:teuthology.orchestra.run.vm03.stdout:Mon IP `192.168.123.103` is in CIDR network `192.168.123.0/24` 2026-03-09T14:59:52.190 INFO:teuthology.orchestra.run.vm03.stdout:Mon IP `192.168.123.103` is in CIDR network `192.168.123.0/24` 2026-03-09T14:59:52.190 INFO:teuthology.orchestra.run.vm03.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-03-09T14:59:52.190 INFO:teuthology.orchestra.run.vm03.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-09T14:59:52.191 INFO:teuthology.orchestra.run.vm03.stdout:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T14:59:53.507 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c 2026-03-09T14:59:53.507 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Trying to pull quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-09T14:59:53.507 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Getting image source signatures 2026-03-09T14:59:53.507 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying blob sha256:1752b8d01aa0dd33bbe0ab24e8316174c94fbdcd5d26252e2680bba0624747a7 2026-03-09T14:59:53.507 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying blob sha256:8e380faede39ebd4286247457b408d979ab568aafd8389c42ec304b8cfba4e92 2026-03-09T14:59:53.507 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying config sha256:654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c 2026-03-09T14:59:53.507 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Writing manifest to image destination 2026-03-09T14:59:54.068 INFO:teuthology.orchestra.run.vm03.stdout:ceph: stdout ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-09T14:59:54.068 INFO:teuthology.orchestra.run.vm03.stdout:Ceph version: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-09T14:59:54.068 INFO:teuthology.orchestra.run.vm03.stdout:Extracting ceph user uid/gid from container image... 2026-03-09T14:59:54.539 INFO:teuthology.orchestra.run.vm03.stdout:stat: stdout 167 167 2026-03-09T14:59:54.540 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial keys... 2026-03-09T14:59:54.762 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQBq4K5pJIEDJhAAIAAw0eVirUyZ7ObRP2zzOw== 2026-03-09T14:59:55.018 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQBq4K5p5mjsMxAA0iGDHUBvs/awTerkNRq7ig== 2026-03-09T14:59:55.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQBr4K5pY/pFBhAACCe1WWJiwLj81F9FjIM33g== 2026-03-09T14:59:55.422 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial monmap... 2026-03-09T14:59:55.699 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-09T14:59:55.699 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-09T14:59:55.699 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T14:59:55.699 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-09T14:59:55.699 INFO:teuthology.orchestra.run.vm03.stdout:monmaptool for a [v2:192.168.123.103:3300,v1:192.168.123.103:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-09T14:59:55.699 INFO:teuthology.orchestra.run.vm03.stdout:setting min_mon_release = quincy 2026-03-09T14:59:55.699 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: set fsid to 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T14:59:55.699 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-09T14:59:55.699 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T14:59:55.699 INFO:teuthology.orchestra.run.vm03.stdout:Creating mon... 2026-03-09T14:59:55.970 INFO:teuthology.orchestra.run.vm03.stdout:create mon.a on 2026-03-09T14:59:56.268 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-09T14:59:56.406 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd.target → /etc/systemd/system/ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd.target. 2026-03-09T14:59:56.406 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd.target → /etc/systemd/system/ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd.target. 2026-03-09T14:59:56.564 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mon.a 2026-03-09T14:59:56.564 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to reset failed state of unit ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mon.a.service: Unit ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mon.a.service not loaded. 2026-03-09T14:59:56.703 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd.target.wants/ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mon.a.service → /etc/systemd/system/ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@.service. 2026-03-09T14:59:56.893 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-09T14:59:56.893 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to enable service . firewalld.service is not available 2026-03-09T14:59:56.893 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mon to start... 2026-03-09T14:59:56.893 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mon... 2026-03-09T14:59:57.236 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout cluster: 2026-03-09T14:59:57.236 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout id: 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T14:59:57.236 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-09T14:59:57.236 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T14:59:57.236 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout services: 2026-03-09T14:59:57.236 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum a (age 0.159921s) 2026-03-09T14:59:57.236 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-09T14:59:57.236 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-09T14:59:57.237 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T14:59:57.237 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout data: 2026-03-09T14:59:57.237 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-09T14:59:57.237 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-09T14:59:57.237 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-09T14:59:57.237 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout pgs: 2026-03-09T14:59:57.237 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T14:59:57.237 INFO:teuthology.orchestra.run.vm03.stdout:mon is available 2026-03-09T14:59:57.237 INFO:teuthology.orchestra.run.vm03.stdout:Assimilating anything we can from ceph.conf... 2026-03-09T14:59:57.705 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T14:59:57.705 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [global] 2026-03-09T14:59:57.705 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout fsid = 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T14:59:57.705 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-09T14:59:57.705 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [osd] 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-09T14:59:57.706 INFO:teuthology.orchestra.run.vm03.stdout:Generating new minimal ceph.conf... 2026-03-09T14:59:58.023 INFO:teuthology.orchestra.run.vm03.stdout:Restarting the monitor... 2026-03-09T14:59:58.313 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mon-a[50854]: 2026-03-09T14:59:58.117+0000 7f0533e1a640 -1 mon.a@0(leader) e1 *** Got Signal Terminated *** 2026-03-09T14:59:58.580 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 podman[51149]: 2026-03-09 14:59:58.313141732 +0000 UTC m=+0.212570158 container died 91baa2c37ced91ff53d580569e62ce0c08a91b6166d37f00534640cd95e8e6f3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mon-a, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T14:59:58.580 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 podman[51149]: 2026-03-09 14:59:58.431468717 +0000 UTC m=+0.330897143 container remove 91baa2c37ced91ff53d580569e62ce0c08a91b6166d37f00534640cd95e8e6f3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mon-a, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T14:59:58.580 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 bash[51149]: ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mon-a 2026-03-09T14:59:58.580 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 systemd[1]: ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mon.a.service: Deactivated successfully. 2026-03-09T14:59:58.580 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 systemd[1]: Stopped Ceph mon.a for 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd. 2026-03-09T14:59:58.580 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 systemd[1]: Starting Ceph mon.a for 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd... 2026-03-09T14:59:58.632 INFO:teuthology.orchestra.run.vm03.stdout:Setting public_network to 192.168.123.0/24 in mon config section 2026-03-09T14:59:58.835 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 podman[51228]: 2026-03-09 14:59:58.580171992 +0000 UTC m=+0.021338605 container create 1cb95ecfe67db391dd4eb859d15437cb1101e8f22b0674a5d83a9dbaa2f8be5d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mon-a, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , ceph=True, io.buildah.version=1.41.3, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T14:59:58.835 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 podman[51228]: 2026-03-09 14:59:58.616795218 +0000 UTC m=+0.057961842 container init 1cb95ecfe67db391dd4eb859d15437cb1101e8f22b0674a5d83a9dbaa2f8be5d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mon-a, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3) 2026-03-09T14:59:58.835 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 podman[51228]: 2026-03-09 14:59:58.622717141 +0000 UTC m=+0.063883754 container start 1cb95ecfe67db391dd4eb859d15437cb1101e8f22b0674a5d83a9dbaa2f8be5d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mon-a, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T14:59:58.835 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 bash[51228]: 1cb95ecfe67db391dd4eb859d15437cb1101e8f22b0674a5d83a9dbaa2f8be5d 2026-03-09T14:59:58.835 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 podman[51228]: 2026-03-09 14:59:58.57197149 +0000 UTC m=+0.013138113 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 systemd[1]: Started Ceph mon.a for 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd. 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 7 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: pidfile_write: ignore empty --pid-file 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: load: jerasure load: lrc 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: RocksDB version: 7.9.2 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Git sha 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: DB SUMMARY 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: DB Session ID: XO396HUWLF7VRG7EX0GY 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: CURRENT file: CURRENT 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: MANIFEST file: MANIFEST-000010 size: 179 Bytes 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000008.sst 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000009.log size: 87471 ; 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.error_if_exists: 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.create_if_missing: 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.paranoid_checks: 1 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.env: 0x556fc4acadc0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.fs: PosixFileSystem 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.info_log: 0x556fc5d81880 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.statistics: (nil) 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.use_fsync: 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_log_file_size: 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.allow_fallocate: 1 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.use_direct_reads: 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T14:59:58.836 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.db_log_dir: 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.wal_dir: 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.write_buffer_manager: 0x556fc5d85900 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.unordered_write: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.row_cache: None 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.wal_filter: None 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.two_write_queues: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.wal_compression: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.atomic_flush: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.log_readahead_size: 0 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T14:59:58.837 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_background_jobs: 2 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_background_compactions: -1 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_subcompactions: 1 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_open_files: -1 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_background_flushes: -1 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Compression algorithms supported: 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: kZSTD supported: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: kXpressCompression supported: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: kBZip2Compression supported: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: kLZ4Compression supported: 1 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: kZlibCompression supported: 1 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: kSnappyCompression supported: 1 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000010 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.merge_operator: 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_filter: None 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T14:59:58.838 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x556fc5d80480) 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: cache_index_and_filter_blocks: 1 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: pin_top_level_index_and_filter: 1 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: index_type: 0 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: data_block_index_type: 0 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: index_shortening: 1 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: checksum: 4 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: no_block_cache: 0 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: block_cache: 0x556fc5da5350 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: block_cache_name: BinnedLRUCache 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: block_cache_options: 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: capacity : 536870912 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: num_shard_bits : 4 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: strict_capacity_limit : 0 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: high_pri_pool_ratio: 0.000 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: block_cache_compressed: (nil) 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: persistent_cache: (nil) 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: block_size: 4096 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: block_size_deviation: 10 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: block_restart_interval: 16 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: index_block_restart_interval: 1 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: metadata_block_size: 4096 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: partition_filters: 0 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: use_delta_encoding: 1 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: filter_policy: bloomfilter 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: whole_key_filtering: 1 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: verify_compression: 0 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: read_amp_bytes_per_bit: 0 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: format_version: 5 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: enable_index_compression: 1 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: block_align: 0 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: max_auto_readahead_size: 262144 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: prepopulate_block_cache: 0 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: initial_auto_readahead_size: 8192 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout: num_file_reads_for_auto_readahead: 2 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compression: NoCompression 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T14:59:58.839 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.num_levels: 7 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.arena_block_size: 1048576 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T14:59:58.840 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.inplace_update_support: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.bloom_locality: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.max_successive_merges: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.ttl: 2592000 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.enable_blob_files: false 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.min_blob_size: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.blob_file_starting_level: 0 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: c81fe8f2-afff-4143-b828-b6d3c2da9a57 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773068398649841, "job": 1, "event": "recovery_started", "wal_files": [9]} 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773068398651368, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 84450, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 8, "largest_seqno": 244, "table_properties": {"data_size": 82595, "index_size": 244, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 581, "raw_key_size": 10018, "raw_average_key_size": 47, "raw_value_size": 76903, "raw_average_value_size": 364, "num_data_blocks": 11, "num_entries": 211, "num_filter_entries": 211, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773068398, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "c81fe8f2-afff-4143-b828-b6d3c2da9a57", "db_session_id": "XO396HUWLF7VRG7EX0GY", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}} 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773068398651417, "job": 1, "event": "recovery_finished"} 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: [db/version_set.cc:5047] Creating manifest 15 2026-03-09T14:59:58.841 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x556fc5da6e00 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: DB pointer 0x556fc5eb0000 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: ** DB Stats ** 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: ** Compaction Stats [default] ** 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: L0 2/0 84.33 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 60.2 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Sum 2/0 84.33 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 60.2 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 60.2 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: ** Compaction Stats [default] ** 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 60.2 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Cumulative compaction: 0.00 GB write, 15.44 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Interval compaction: 0.00 GB write, 15.44 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Block cache BinnedLRUCache@0x556fc5da5350#7 capacity: 512.00 MB usage: 6.23 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.6e-05 secs_since: 0 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: Block cache entry stats(count,size,portion): DataBlock(2,5.03 KB,0.000959635%) FilterBlock(2,0.77 KB,0.000146031%) IndexBlock(2,0.44 KB,8.34465e-05%) Misc(1,0.00 KB,0%) 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: starting mon.a rank 0 at public addrs [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] at bind addrs [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon_data /var/lib/ceph/mon/ceph-a fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: mon.a@-1(???) e1 preinit fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T14:59:58.842 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: mon.a@-1(???).mds e1 new map 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: mon.a@-1(???).mds e1 print_map 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout: e1 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout: btime 2026-03-09T14:59:56:920328+0000 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout: legacy client fscid: -1 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout: 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout: No filesystems configured 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: mon.a@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-09T14:59:58.843 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:58 vm03 ceph-mon[51263]: mon.a@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-03-09T14:59:58.976 INFO:teuthology.orchestra.run.vm03.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-09T14:59:58.976 INFO:teuthology.orchestra.run.vm03.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-09T14:59:58.977 INFO:teuthology.orchestra.run.vm03.stdout:Creating mgr... 2026-03-09T14:59:58.977 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-09T14:59:58.977 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-09T14:59:59.142 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mgr.x 2026-03-09T14:59:59.142 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to reset failed state of unit ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mgr.x.service: Unit ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mgr.x.service not loaded. 2026-03-09T14:59:59.272 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd.target.wants/ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mgr.x.service → /etc/systemd/system/ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@.service. 2026-03-09T14:59:59.404 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 14:59:59 vm03 systemd[1]: Starting Ceph mgr.x for 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd... 2026-03-09T14:59:59.562 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-09T14:59:59.562 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to enable service . firewalld.service is not available 2026-03-09T14:59:59.562 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-09T14:59:59.562 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to open ports <[9283, 8765]>. firewalld.service is not available 2026-03-09T14:59:59.562 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr to start... 2026-03-09T14:59:59.562 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr... 2026-03-09T14:59:59.664 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 14:59:59 vm03 podman[51485]: 2026-03-09 14:59:59.403867964 +0000 UTC m=+0.023809407 container create c2d75d3874e5a65b1be100c56412819d71d93c05fa46fef4b738dd18a819c0f4 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T14:59:59.664 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 14:59:59 vm03 podman[51485]: 2026-03-09 14:59:59.390524827 +0000 UTC m=+0.010466270 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T14:59:59.664 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 14:59:59 vm03 podman[51485]: 2026-03-09 14:59:59.53377208 +0000 UTC m=+0.153713514 container init c2d75d3874e5a65b1be100c56412819d71d93c05fa46fef4b738dd18a819c0f4 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0) 2026-03-09T14:59:59.664 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 14:59:59 vm03 podman[51485]: 2026-03-09 14:59:59.539254003 +0000 UTC m=+0.159195446 container start c2d75d3874e5a65b1be100c56412819d71d93c05fa46fef4b738dd18a819c0f4 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2) 2026-03-09T14:59:59.664 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 14:59:59 vm03 bash[51485]: c2d75d3874e5a65b1be100c56412819d71d93c05fa46fef4b738dd18a819c0f4 2026-03-09T14:59:59.664 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 14:59:59 vm03 systemd[1]: Started Ceph mgr.x for 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd. 2026-03-09T14:59:59.664 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 14:59:59 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T14:59:59.651+0000 7f129ceda140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 14:59:59 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T14:59:59.703+0000 7f129ceda140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: monmap epoch 1 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: last_changed 2026-03-09T14:59:55.544874+0000 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: created 2026-03-09T14:59:55.544874+0000 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: min_mon_release 19 (squid) 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: election_strategy: 1 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.a 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: fsmap 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: osdmap e1: 0 total, 0 up, 0 in 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: mgrmap e1: no daemons active 2026-03-09T14:59:59.977 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 14:59:59 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1929949344' entity='client.admin' 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "6884f6b8-1bc8-11f1-a1b7-432e3f447ddd", 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 1, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-09T14:59:59.993 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-09T14:59:56:920328+0000", 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-09T14:59:56.921730+0000", 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T14:59:59.994 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (1/15)... 2026-03-09T15:00:00.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:00 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:00.188+0000 7f129ceda140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:00:00.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:00 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:00.544+0000 7f129ceda140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:00:00.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:00 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T15:00:00.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:00 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T15:00:00.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:00 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: from numpy import show_config as show_numpy_config 2026-03-09T15:00:00.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:00 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:00.636+0000 7f129ceda140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:00:00.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:00 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:00.676+0000 7f129ceda140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:00:00.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:00 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:00.754+0000 7f129ceda140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:00:01.559 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:01 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:01.302+0000 7f129ceda140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:00:01.559 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:01 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:01.429+0000 7f129ceda140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:00:01.559 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:01 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:01.477+0000 7f129ceda140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:00:01.559 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:01 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:01.515+0000 7f129ceda140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:00:01.559 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:01 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1773384367' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T15:00:01.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:01 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:01.558+0000 7f129ceda140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:00:01.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:01 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:01.596+0000 7f129ceda140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:00:01.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:01 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:01.783+0000 7f129ceda140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:00:02.103 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:01 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:01.838+0000 7f129ceda140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:00:02.103 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:02 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:02.102+0000 7f129ceda140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T15:00:02.414 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:02 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:02.411+0000 7f129ceda140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:00:02.414 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:02 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3798877065' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T15:00:02.462 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T15:00:02.462 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T15:00:02.462 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "6884f6b8-1bc8-11f1-a1b7-432e3f447ddd", 2026-03-09T15:00:02.462 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-09T15:00:02.462 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-09T14:59:56:920328+0000", 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-09T14:59:56.921730+0000", 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-09T15:00:02.463 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T15:00:02.464 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (2/15)... 2026-03-09T15:00:02.722 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:02 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:02.456+0000 7f129ceda140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:00:02.723 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:02 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:02.504+0000 7f129ceda140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:00:02.723 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:02 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:02.583+0000 7f129ceda140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:00:02.723 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:02 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:02.626+0000 7f129ceda140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:00:03.003 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:02 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:02.721+0000 7f129ceda140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:00:03.003 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:02 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:02.846+0000 7f129ceda140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:00:03.331 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:03 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:03.002+0000 7f129ceda140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:00:03.331 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:03 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:03.042+0000 7f129ceda140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:00:03.583 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: Activating manager daemon x 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: mgrmap e2: x(active, starting, since 0.00502303s) 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: from='mgr.14100 192.168.123.103:0/1460661365' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: from='mgr.14100 192.168.123.103:0/1460661365' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: from='mgr.14100 192.168.123.103:0/1460661365' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: from='mgr.14100 192.168.123.103:0/1460661365' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: from='mgr.14100 192.168.123.103:0/1460661365' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: Manager daemon x is now available 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: from='mgr.14100 192.168.123.103:0/1460661365' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: from='mgr.14100 192.168.123.103:0/1460661365' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: from='mgr.14100 192.168.123.103:0/1460661365' entity='mgr.x' 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: from='mgr.14100 192.168.123.103:0/1460661365' entity='mgr.x' 2026-03-09T15:00:03.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:03 vm03 ceph-mon[51263]: from='mgr.14100 192.168.123.103:0/1460661365' entity='mgr.x' 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "6884f6b8-1bc8-11f1-a1b7-432e3f447ddd", 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-09T15:00:04.994 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "a" 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 6, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-09T14:59:56:920328+0000", 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-09T14:59:56.921730+0000", 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T15:00:04.995 INFO:teuthology.orchestra.run.vm03.stdout:mgr is available 2026-03-09T15:00:05.281 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:05 vm03 ceph-mon[51263]: mgrmap e3: x(active, since 1.01254s) 2026-03-09T15:00:05.281 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:05 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1477285790' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [global] 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout fsid = 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [osd] 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-09T15:00:05.405 INFO:teuthology.orchestra.run.vm03.stdout:Enabling cephadm module... 2026-03-09T15:00:06.604 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:06 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: ignoring --setuser ceph since I am not root 2026-03-09T15:00:06.604 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:06 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: ignoring --setgroup ceph since I am not root 2026-03-09T15:00:06.604 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:06 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:06.495+0000 7f3ae6c25140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T15:00:06.604 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:06 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:06.547+0000 7f3ae6c25140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:00:06.604 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:06 vm03 ceph-mon[51263]: mgrmap e4: x(active, since 2s) 2026-03-09T15:00:06.604 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:06 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/745329113' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-09T15:00:06.604 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:06 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/568759943' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-09T15:00:06.958 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T15:00:06.958 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-03-09T15:00:06.958 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-09T15:00:06.958 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "active_name": "x", 2026-03-09T15:00:06.958 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-09T15:00:06.958 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T15:00:06.958 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for the mgr to restart... 2026-03-09T15:00:06.958 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr epoch 5... 2026-03-09T15:00:07.220 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:06 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:06.984+0000 7f3ae6c25140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:00:07.484 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:07 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:07.349+0000 7f3ae6c25140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:00:07.484 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:07 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T15:00:07.484 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:07 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T15:00:07.484 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:07 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: from numpy import show_config as show_numpy_config 2026-03-09T15:00:07.484 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:07 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:07.441+0000 7f3ae6c25140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:00:07.484 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:07 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/568759943' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-09T15:00:07.484 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:07 vm03 ceph-mon[51263]: mgrmap e5: x(active, since 3s) 2026-03-09T15:00:07.484 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:07 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3014601652' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-09T15:00:07.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:07 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:07.483+0000 7f3ae6c25140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:00:07.833 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:07 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:07.558+0000 7f3ae6c25140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:00:08.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:08 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:08.081+0000 7f3ae6c25140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:00:08.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:08 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:08.199+0000 7f3ae6c25140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:00:08.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:08 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:08.243+0000 7f3ae6c25140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:00:08.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:08 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:08.284+0000 7f3ae6c25140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:00:08.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:08 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:08.328+0000 7f3ae6c25140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:00:08.822 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:08 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:08.367+0000 7f3ae6c25140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:00:08.822 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:08 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:08.544+0000 7f3ae6c25140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:00:08.822 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:08 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:08.595+0000 7f3ae6c25140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:00:09.083 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:08 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:08.821+0000 7f3ae6c25140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T15:00:09.409 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:09 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:09.123+0000 7f3ae6c25140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:00:09.409 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:09 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:09.164+0000 7f3ae6c25140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:00:09.409 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:09 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:09.208+0000 7f3ae6c25140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:00:09.409 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:09 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:09.291+0000 7f3ae6c25140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:00:09.409 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:09 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:09.329+0000 7f3ae6c25140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:00:09.679 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:09 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:09.408+0000 7f3ae6c25140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:00:09.679 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:09 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:09.529+0000 7f3ae6c25140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:09 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:09.678+0000 7f3ae6c25140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:09 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:09.719+0000 7f3ae6c25140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: Active manager daemon x restarted 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: Activating manager daemon x 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: osdmap e2: 0 total, 0 up, 0 in 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: mgrmap e6: x(active, starting, since 0.0321234s) 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: Manager daemon x is now available 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:00:10.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:09 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T15:00:10.922 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T15:00:10.922 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-03-09T15:00:10.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-09T15:00:10.923 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T15:00:10.923 INFO:teuthology.orchestra.run.vm03.stdout:mgr epoch 5 is available 2026-03-09T15:00:10.923 INFO:teuthology.orchestra.run.vm03.stdout:Setting orchestrator backend to cephadm... 2026-03-09T15:00:11.027 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:10 vm03 ceph-mon[51263]: Found migration_current of "None". Setting to last migration. 2026-03-09T15:00:11.028 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:10 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:11.028 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:10 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:11.028 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:10 vm03 ceph-mon[51263]: mgrmap e7: x(active, since 1.03632s) 2026-03-09T15:00:11.978 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-09T15:00:11.978 INFO:teuthology.orchestra.run.vm03.stdout:Generating ssh key... 2026-03-09T15:00:12.233 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:12 vm03 ceph-mon[51263]: from='client.14122 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-09T15:00:12.233 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:12 vm03 ceph-mon[51263]: from='client.14122 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-09T15:00:12.233 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:12 vm03 ceph-mon[51263]: from='client.14130 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:12.233 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:12 vm03 ceph-mon[51263]: [09/Mar/2026:15:00:11] ENGINE Bus STARTING 2026-03-09T15:00:12.233 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:12 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:12.233 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:12 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:12.233 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:12 vm03 ceph-mon[51263]: [09/Mar/2026:15:00:11] ENGINE Serving on https://192.168.123.103:7150 2026-03-09T15:00:12.233 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:12 vm03 ceph-mon[51263]: [09/Mar/2026:15:00:11] ENGINE Client ('192.168.123.103', 41462) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T15:00:12.233 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:12 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: Generating public/private ed25519 key pair. 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: Your identification has been saved in /tmp/tmpkrfr2rwz/key 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: Your public key has been saved in /tmp/tmpkrfr2rwz/key.pub 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: The key fingerprint is: 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: SHA256:EnTXfZaqDkelrGf+lGzHw8YL1eH5dtAuk4AQxIrt0xs ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: The key's randomart image is: 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: +--[ED25519 256]--+ 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: | .o+ .. . .| 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: | . ..o ....o| 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: | o... . o .+ | 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: | . o. . = ...+| 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: | ...S + o .+o| 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: | o.Eo =..*+.| 2026-03-09T15:00:12.492 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: | . oB *+B=| 2026-03-09T15:00:12.493 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: | . oo +++| 2026-03-09T15:00:12.493 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: | .. . | 2026-03-09T15:00:12.493 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:12 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: +----[SHA256]-----+ 2026-03-09T15:00:12.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP+mlNl9Tvc0VGf2xyUePQdqVHr62/px6Vjx7BJjFjcl ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T15:00:12.854 INFO:teuthology.orchestra.run.vm03.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-09T15:00:12.854 INFO:teuthology.orchestra.run.vm03.stdout:Adding key to root@localhost authorized_keys... 2026-03-09T15:00:12.854 INFO:teuthology.orchestra.run.vm03.stdout:Adding host vm03... 2026-03-09T15:00:13.427 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:13 vm03 ceph-mon[51263]: [09/Mar/2026:15:00:11] ENGINE Serving on http://192.168.123.103:8765 2026-03-09T15:00:13.427 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:13 vm03 ceph-mon[51263]: [09/Mar/2026:15:00:11] ENGINE Bus STARTED 2026-03-09T15:00:13.427 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:13 vm03 ceph-mon[51263]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:13.427 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:13 vm03 ceph-mon[51263]: mgrmap e8: x(active, since 2s) 2026-03-09T15:00:13.427 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:13 vm03 ceph-mon[51263]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:13.427 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:13 vm03 ceph-mon[51263]: Generating ssh key... 2026-03-09T15:00:13.427 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:13 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:13.427 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:13 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:14.583 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:14 vm03 ceph-mon[51263]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:14.583 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:14 vm03 ceph-mon[51263]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm03", "addr": "192.168.123.103", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:14.905 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Added host 'vm03' with addr '192.168.123.103' 2026-03-09T15:00:14.905 INFO:teuthology.orchestra.run.vm03.stdout:Deploying unmanaged mon service... 2026-03-09T15:00:15.443 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-09T15:00:15.444 INFO:teuthology.orchestra.run.vm03.stdout:Deploying unmanaged mgr service... 2026-03-09T15:00:15.583 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:15 vm03 ceph-mon[51263]: Deploying cephadm binary to vm03 2026-03-09T15:00:15.583 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:15 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:15.583 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:15 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:15.583 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:15 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:15.836 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-09T15:00:16.362 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:16 vm03 ceph-mon[51263]: Added host vm03 2026-03-09T15:00:16.362 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:16 vm03 ceph-mon[51263]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:16.362 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:16 vm03 ceph-mon[51263]: Saving service mon spec with placement count:5 2026-03-09T15:00:16.362 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:16 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:16.362 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:16 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3802281477' entity='client.admin' 2026-03-09T15:00:16.697 INFO:teuthology.orchestra.run.vm03.stdout:Enabling the dashboard module... 2026-03-09T15:00:17.792 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:17 vm03 ceph-mon[51263]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:17.792 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:17 vm03 ceph-mon[51263]: Saving service mgr spec with placement count:2 2026-03-09T15:00:17.792 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:17 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:17.792 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:17 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/2112394432' entity='client.admin' 2026-03-09T15:00:17.792 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:17 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:17.792 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:17 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3626616614' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-09T15:00:17.792 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:17 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:18.049 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:17 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: ignoring --setuser ceph since I am not root 2026-03-09T15:00:18.049 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:17 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: ignoring --setgroup ceph since I am not root 2026-03-09T15:00:18.049 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:17 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:17.966+0000 7fe25037b140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T15:00:18.049 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:18 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:18.016+0000 7fe25037b140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:00:18.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T15:00:18.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-03-09T15:00:18.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-09T15:00:18.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "active_name": "x", 2026-03-09T15:00:18.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-09T15:00:18.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T15:00:18.445 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for the mgr to restart... 2026-03-09T15:00:18.445 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr epoch 9... 2026-03-09T15:00:18.563 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:18 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:18.516+0000 7fe25037b140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:00:18.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:18 vm03 ceph-mon[51263]: from='mgr.14118 192.168.123.103:0/2540499605' entity='mgr.x' 2026-03-09T15:00:18.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:18 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3626616614' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-09T15:00:18.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:18 vm03 ceph-mon[51263]: mgrmap e9: x(active, since 8s) 2026-03-09T15:00:18.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:18 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1551861348' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-09T15:00:19.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:18 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:18.852+0000 7fe25037b140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:00:19.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:18 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T15:00:19.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:18 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T15:00:19.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:18 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: from numpy import show_config as show_numpy_config 2026-03-09T15:00:19.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:18 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:18.951+0000 7fe25037b140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:00:19.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:18 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:18.996+0000 7fe25037b140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:00:19.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:19 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:19.080+0000 7fe25037b140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:00:19.930 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:19 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:19.640+0000 7fe25037b140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:00:19.930 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:19 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:19.763+0000 7fe25037b140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:00:19.930 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:19 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:19.807+0000 7fe25037b140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:00:19.930 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:19 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:19.844+0000 7fe25037b140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:00:19.930 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:19 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:19.889+0000 7fe25037b140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:00:19.930 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:19 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:19.928+0000 7fe25037b140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:00:20.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:20 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:20.114+0000 7fe25037b140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:00:20.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:20 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:20.171+0000 7fe25037b140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:00:20.748 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:20 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:20.429+0000 7fe25037b140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T15:00:20.749 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:20 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:20.747+0000 7fe25037b140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:00:21.061 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:20 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:20.785+0000 7fe25037b140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:00:21.061 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:20 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:20.835+0000 7fe25037b140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:00:21.061 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:20 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:20.927+0000 7fe25037b140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:00:21.061 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:20 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:20.968+0000 7fe25037b140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:00:21.061 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:21 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:21.059+0000 7fe25037b140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:00:21.333 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:21 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:21.188+0000 7fe25037b140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:00:21.597 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:21 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:21.343+0000 7fe25037b140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:00:21.597 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:00:21 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:00:21.387+0000 7fe25037b140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:00:21.597 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: Active manager daemon x restarted 2026-03-09T15:00:21.597 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: Activating manager daemon x 2026-03-09T15:00:21.597 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: osdmap e3: 0 total, 0 up, 0 in 2026-03-09T15:00:21.597 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: mgrmap e10: x(active, starting, since 0.105202s) 2026-03-09T15:00:21.597 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:00:21.598 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:00:21.598 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:00:21.598 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:00:21.598 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:00:21.598 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: Manager daemon x is now available 2026-03-09T15:00:21.598 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:00:21.598 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:22.657 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-09T15:00:22.657 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-03-09T15:00:22.657 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-09T15:00:22.657 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-09T15:00:22.657 INFO:teuthology.orchestra.run.vm03.stdout:mgr epoch 9 is available 2026-03-09T15:00:22.657 INFO:teuthology.orchestra.run.vm03.stdout:Generating a dashboard self-signed certificate... 2026-03-09T15:00:22.780 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:22 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T15:00:22.780 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:22 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:22.780 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:22 vm03 ceph-mon[51263]: mgrmap e11: x(active, since 1.1106s) 2026-03-09T15:00:23.180 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-09T15:00:23.180 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial admin user... 2026-03-09T15:00:23.766 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$WDm42aSBWhxzhmUllbIWYuRiX15hwVu2QSIZ48gvSgcG9PdjTOsFO", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773068423, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-09T15:00:23.766 INFO:teuthology.orchestra.run.vm03.stdout:Fetching dashboard port number... 2026-03-09T15:00:23.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:23 vm03 ceph-mon[51263]: [09/Mar/2026:15:00:22] ENGINE Bus STARTING 2026-03-09T15:00:23.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:23 vm03 ceph-mon[51263]: [09/Mar/2026:15:00:22] ENGINE Serving on https://192.168.123.103:7150 2026-03-09T15:00:23.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:23 vm03 ceph-mon[51263]: [09/Mar/2026:15:00:22] ENGINE Client ('192.168.123.103', 55070) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T15:00:23.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:23 vm03 ceph-mon[51263]: [09/Mar/2026:15:00:22] ENGINE Serving on http://192.168.123.103:8765 2026-03-09T15:00:23.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:23 vm03 ceph-mon[51263]: [09/Mar/2026:15:00:22] ENGINE Bus STARTED 2026-03-09T15:00:23.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:23 vm03 ceph-mon[51263]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-09T15:00:23.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:23 vm03 ceph-mon[51263]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-09T15:00:23.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:23 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:23.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:23 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:23.862 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:23 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:24.190 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 8443 2026-03-09T15:00:24.190 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-09T15:00:24.190 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-09T15:00:24.191 INFO:teuthology.orchestra.run.vm03.stdout:Ceph Dashboard is now available at: 2026-03-09T15:00:24.191 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:24.191 INFO:teuthology.orchestra.run.vm03.stdout: URL: https://vm03.local:8443/ 2026-03-09T15:00:24.191 INFO:teuthology.orchestra.run.vm03.stdout: User: admin 2026-03-09T15:00:24.191 INFO:teuthology.orchestra.run.vm03.stdout: Password: 3zypimerep 2026-03-09T15:00:24.191 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:24.191 INFO:teuthology.orchestra.run.vm03.stdout:Saving cluster configuration to /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/config directory 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout:Or, if you are only running a single cluster on this host: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: ceph telemetry on 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout:For more information see: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:24.656 INFO:teuthology.orchestra.run.vm03.stdout:Bootstrap complete. 2026-03-09T15:00:24.694 INFO:tasks.cephadm:Fetching config... 2026-03-09T15:00:24.694 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T15:00:24.694 DEBUG:teuthology.orchestra.run.vm03:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-09T15:00:24.722 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-09T15:00:24.722 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T15:00:24.722 DEBUG:teuthology.orchestra.run.vm03:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-09T15:00:24.755 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:24 vm03 ceph-mon[51263]: from='client.14162 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:24.755 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:24 vm03 ceph-mon[51263]: from='client.14164 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:24.755 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:24 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:24.755 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:24 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1584133898' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-09T15:00:24.755 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:24 vm03 ceph-mon[51263]: mgrmap e12: x(active, since 2s) 2026-03-09T15:00:24.755 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:24 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3818427486' entity='client.admin' 2026-03-09T15:00:24.781 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-09T15:00:24.781 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T15:00:24.781 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/keyring of=/dev/stdout 2026-03-09T15:00:24.855 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-09T15:00:24.855 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T15:00:24.855 DEBUG:teuthology.orchestra.run.vm03:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-09T15:00:24.916 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-09T15:00:24.916 DEBUG:teuthology.orchestra.run.vm03:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP+mlNl9Tvc0VGf2xyUePQdqVHr62/px6Vjx7BJjFjcl ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-09T15:00:25.011 INFO:teuthology.orchestra.run.vm03.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP+mlNl9Tvc0VGf2xyUePQdqVHr62/px6Vjx7BJjFjcl ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T15:00:25.033 DEBUG:teuthology.orchestra.run.vm04:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP+mlNl9Tvc0VGf2xyUePQdqVHr62/px6Vjx7BJjFjcl ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-09T15:00:25.069 INFO:teuthology.orchestra.run.vm04.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP+mlNl9Tvc0VGf2xyUePQdqVHr62/px6Vjx7BJjFjcl ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T15:00:25.082 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-09T15:00:25.287 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:25.926 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-09T15:00:25.926 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-09T15:00:26.382 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:26.800 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:26 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1456417064' entity='client.admin' 2026-03-09T15:00:26.945 INFO:tasks.cephadm:Remote vm04 excluded from cephadm cluster by role 2026-03-09T15:00:26.945 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-09T15:00:26.945 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph osd crush tunables default 2026-03-09T15:00:27.157 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:27.798 INFO:teuthology.orchestra.run.vm03.stderr:adjusted tunables profile to default 2026-03-09T15:00:27.954 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:27 vm03 ceph-mon[51263]: from='client.14172 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:27.954 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:27 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:27.954 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:27 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/2516873708' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-09T15:00:27.997 INFO:tasks.cephadm:Adding mon.a on vm03 2026-03-09T15:00:27.997 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch apply mon '1;vm03:192.168.123.103=a' 2026-03-09T15:00:28.201 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:28.500 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled mon update... 2026-03-09T15:00:28.664 INFO:tasks.cephadm:Waiting for 1 mons in monmap... 2026-03-09T15:00:28.664 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph mon dump -f json 2026-03-09T15:00:28.796 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/2516873708' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-09T15:00:28.796 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T15:00:29.049 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: mgrmap e13: x(active, since 6s) 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: Updating vm03:/etc/ceph/ceph.conf 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.086 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.087 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.087 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.087 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:00:29.087 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:00:29.087 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:28 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:29.344 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:29.344 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":1,"fsid":"6884f6b8-1bc8-11f1-a1b7-432e3f447ddd","modified":"2026-03-09T14:59:55.544874Z","created":"2026-03-09T14:59:55.544874Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T15:00:29.344 INFO:teuthology.orchestra.run.vm03.stderr:dumped monmap epoch 1 2026-03-09T15:00:29.496 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-09T15:00:29.496 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph config generate-minimal-conf 2026-03-09T15:00:29.670 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:29.921 INFO:teuthology.orchestra.run.vm03.stdout:# minimal ceph.conf for 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T15:00:29.922 INFO:teuthology.orchestra.run.vm03.stdout:[global] 2026-03-09T15:00:29.922 INFO:teuthology.orchestra.run.vm03.stdout: fsid = 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T15:00:29.922 INFO:teuthology.orchestra.run.vm03.stdout: mon_host = [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: Updating vm03:/var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/config/ceph.conf 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: Updating vm03:/var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/config/ceph.client.admin.keyring 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "1;vm03:192.168.123.103=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: Saving service mon spec with placement vm03:192.168.123.103=a;count:1 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: Reconfiguring mon.a (unknown last config time)... 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: Reconfiguring daemon mon.a on vm03 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:29.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:29 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/592582994' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T15:00:30.099 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-09T15:00:30.099 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T15:00:30.099 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.conf 2026-03-09T15:00:30.126 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T15:00:30.126 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:00:30.193 INFO:tasks.cephadm:Adding mgr.x on vm03 2026-03-09T15:00:30.193 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch apply mgr '1;vm03=x' 2026-03-09T15:00:30.422 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:30.709 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled mgr update... 2026-03-09T15:00:30.799 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:30 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/797273856' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:30.799 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:30 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:30.799 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:30 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:30.799 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:30 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:30.799 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:30 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:00:30.799 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:30 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:30.799 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:30 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:30.799 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:30 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:00:30.799 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:30 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:00:30.799 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:30 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:30.877 INFO:tasks.cephadm:Deploying OSDs... 2026-03-09T15:00:30.877 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T15:00:30.877 DEBUG:teuthology.orchestra.run.vm03:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T15:00:30.904 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:00:30.904 DEBUG:teuthology.orchestra.run.vm03:> ls /dev/[sv]d? 2026-03-09T15:00:30.977 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vda 2026-03-09T15:00:30.977 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdb 2026-03-09T15:00:30.977 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdc 2026-03-09T15:00:30.977 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdd 2026-03-09T15:00:30.977 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vde 2026-03-09T15:00:30.977 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-09T15:00:30.977 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-09T15:00:30.977 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdb 2026-03-09T15:00:31.047 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdb 2026-03-09T15:00:31.047 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:00:31.047 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-09T15:00:31.047 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:00:31.047 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:00:31.047 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-09 15:00:27.515720212 +0000 2026-03-09T15:00:31.047 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-09 14:57:38.828249600 +0000 2026-03-09T15:00:31.047 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-09 14:57:38.828249600 +0000 2026-03-09T15:00:31.047 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-09 14:53:01.264000000 +0000 2026-03-09T15:00:31.048 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-09T15:00:31.116 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-09T15:00:31.116 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-09T15:00:31.116 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000158888 s, 3.2 MB/s 2026-03-09T15:00:31.117 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-09T15:00:31.191 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdc 2026-03-09T15:00:31.254 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdc 2026-03-09T15:00:31.254 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:00:31.254 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-09T15:00:31.254 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:00:31.254 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:00:31.254 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-09 15:00:27.552720258 +0000 2026-03-09T15:00:31.254 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-09 14:57:38.839249615 +0000 2026-03-09T15:00:31.254 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-09 14:57:38.839249615 +0000 2026-03-09T15:00:31.254 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-09 14:53:01.273000000 +0000 2026-03-09T15:00:31.254 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-09T15:00:31.323 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-09T15:00:31.323 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-09T15:00:31.323 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000199384 s, 2.6 MB/s 2026-03-09T15:00:31.324 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-09T15:00:31.385 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdd 2026-03-09T15:00:31.443 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdd 2026-03-09T15:00:31.443 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:00:31.443 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-09T15:00:31.443 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:00:31.443 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:00:31.443 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-09 15:00:27.597720315 +0000 2026-03-09T15:00:31.443 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-09 14:57:38.833249607 +0000 2026-03-09T15:00:31.443 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-09 14:57:38.833249607 +0000 2026-03-09T15:00:31.444 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-09 14:53:01.277000000 +0000 2026-03-09T15:00:31.444 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-09T15:00:31.508 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-09T15:00:31.508 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-09T15:00:31.508 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000185948 s, 2.8 MB/s 2026-03-09T15:00:31.510 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-09T15:00:31.567 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vde 2026-03-09T15:00:31.623 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vde 2026-03-09T15:00:31.624 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:00:31.624 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-09T15:00:31.624 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:00:31.624 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:00:31.624 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-09 15:00:27.637720365 +0000 2026-03-09T15:00:31.624 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-09 14:57:38.844249622 +0000 2026-03-09T15:00:31.624 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-09 14:57:38.844249622 +0000 2026-03-09T15:00:31.624 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-09 14:53:01.282000000 +0000 2026-03-09T15:00:31.624 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-09T15:00:31.692 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-09T15:00:31.692 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-09T15:00:31.692 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000204062 s, 2.5 MB/s 2026-03-09T15:00:31.694 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-09T15:00:31.754 INFO:tasks.cephadm:Deploying osd.0 on vm03 with /dev/vde... 2026-03-09T15:00:31.754 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- lvm zap /dev/vde 2026-03-09T15:00:31.971 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:32.001 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:31 vm03 ceph-mon[51263]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "1;vm03=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:32.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:31 vm03 ceph-mon[51263]: Saving service mgr spec with placement vm03=x;count:1 2026-03-09T15:00:32.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:31 vm03 ceph-mon[51263]: Reconfiguring mgr.x (unknown last config time)... 2026-03-09T15:00:32.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:31 vm03 ceph-mon[51263]: Reconfiguring daemon mgr.x on vm03 2026-03-09T15:00:32.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:31 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:32.002 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:31 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:33.187 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:33.206 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch daemon add osd vm03:/dev/vde 2026-03-09T15:00:33.397 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:34.206 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:33 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:00:34.206 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:33 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:00:34.206 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:33 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:35.105 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:34 vm03 ceph-mon[51263]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:35.105 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:34 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/96936759' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8669d207-052b-42b6-8a7e-94f5ee8d25c8"}]: dispatch 2026-03-09T15:00:35.105 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:34 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/96936759' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8669d207-052b-42b6-8a7e-94f5ee8d25c8"}]': finished 2026-03-09T15:00:35.105 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:34 vm03 ceph-mon[51263]: osdmap e5: 1 total, 0 up, 1 in 2026-03-09T15:00:35.105 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:34 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:00:36.333 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:35 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1500739858' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:00:39.646 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:39 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T15:00:39.646 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:39 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:41.029 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:40 vm03 ceph-mon[51263]: Deploying daemon osd.0 on vm03 2026-03-09T15:00:42.718 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:42 vm03 ceph-mon[51263]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:00:43.834 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:43 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:43.834 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:43 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:43.834 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:43 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:44.508 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 0 on host 'vm03' 2026-03-09T15:00:44.789 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:44 vm03 ceph-mon[51263]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:00:44.789 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:44 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:44.789 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:44 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:44.789 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:44 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:44.790 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:44 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:00:44.790 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:44 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:44.790 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:44 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:44.790 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:44 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:44.790 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:44 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:44.790 DEBUG:teuthology.orchestra.run.vm03:osd.0> sudo journalctl -f -n 0 -u ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@osd.0.service 2026-03-09T15:00:44.795 INFO:tasks.cephadm:Deploying osd.1 on vm03 with /dev/vdd... 2026-03-09T15:00:44.795 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- lvm zap /dev/vdd 2026-03-09T15:00:45.132 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:45.686 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:45 vm03 ceph-mon[51263]: from='osd.0 [v2:192.168.123.103:6802/1036806564,v1:192.168.123.103:6803/1036806564]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T15:00:47.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:46 vm03 ceph-mon[51263]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:00:47.084 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:46 vm03 ceph-mon[51263]: from='osd.0 [v2:192.168.123.103:6802/1036806564,v1:192.168.123.103:6803/1036806564]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T15:00:47.084 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:46 vm03 ceph-mon[51263]: osdmap e6: 1 total, 0 up, 1 in 2026-03-09T15:00:47.084 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:46 vm03 ceph-mon[51263]: from='osd.0 [v2:192.168.123.103:6802/1036806564,v1:192.168.123.103:6803/1036806564]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-09T15:00:47.084 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:46 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:00:47.084 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:46 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:47.084 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:46 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:47.084 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:46 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:00:47.084 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:46 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:47.084 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:46 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:00:47.084 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:46 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:47.163 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:47.190 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch daemon add osd vm03:/dev/vdd 2026-03-09T15:00:47.393 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:47.801 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:47 vm03 ceph-mon[51263]: Detected new or changed devices on vm03 2026-03-09T15:00:47.801 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:47 vm03 ceph-mon[51263]: Adjusting osd_memory_target on vm03 to 257.0M 2026-03-09T15:00:48.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:47 vm03 ceph-mon[51263]: Unable to set osd_memory_target on vm03 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-09T15:00:48.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:47 vm03 ceph-mon[51263]: from='osd.0 [v2:192.168.123.103:6802/1036806564,v1:192.168.123.103:6803/1036806564]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-09T15:00:48.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:47 vm03 ceph-mon[51263]: osdmap e7: 1 total, 0 up, 1 in 2026-03-09T15:00:48.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:47 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:00:48.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:47 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:00:48.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:47 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:00:48.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:47 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:00:48.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:47 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:48.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:47 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:00:48.863 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:48 vm03 ceph-mon[51263]: purged_snaps scrub starts 2026-03-09T15:00:48.863 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:48 vm03 ceph-mon[51263]: purged_snaps scrub ok 2026-03-09T15:00:48.863 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:48 vm03 ceph-mon[51263]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:00:48.864 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:48 vm03 ceph-mon[51263]: from='client.14193 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm03:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:00:48.864 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:48 vm03 ceph-mon[51263]: from='osd.0 [v2:192.168.123.103:6802/1036806564,v1:192.168.123.103:6803/1036806564]' entity='osd.0' 2026-03-09T15:00:48.864 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:48 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:00:48.864 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:00:48 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0[63825]: 2026-03-09T15:00:48.639+0000 7ff1dcbc6640 -1 osd.0 0 waiting for initial osdmap 2026-03-09T15:00:48.864 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:00:48 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0[63825]: 2026-03-09T15:00:48.648+0000 7ff1d79dc640 -1 osd.0 7 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:00:50.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:49 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1565852507' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b11c233a-dff6-4d35-97c7-c187f7f4116f"}]: dispatch 2026-03-09T15:00:50.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:49 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1565852507' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b11c233a-dff6-4d35-97c7-c187f7f4116f"}]': finished 2026-03-09T15:00:50.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:49 vm03 ceph-mon[51263]: osd.0 [v2:192.168.123.103:6802/1036806564,v1:192.168.123.103:6803/1036806564] boot 2026-03-09T15:00:50.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:49 vm03 ceph-mon[51263]: osdmap e8: 2 total, 1 up, 2 in 2026-03-09T15:00:50.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:49 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:00:50.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:49 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:00:50.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:49 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3191288677' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:00:51.333 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:50 vm03 ceph-mon[51263]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:00:51.333 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:50 vm03 ceph-mon[51263]: osdmap e9: 2 total, 1 up, 2 in 2026-03-09T15:00:51.333 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:50 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:00:53.128 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:52 vm03 ceph-mon[51263]: pgmap v14: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:00:54.188 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:53 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T15:00:54.188 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:53 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:55.027 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:54 vm03 ceph-mon[51263]: pgmap v15: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:00:55.027 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:54 vm03 ceph-mon[51263]: Deploying daemon osd.1 on vm03 2026-03-09T15:00:57.186 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:56 vm03 ceph-mon[51263]: pgmap v16: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:00:57.186 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:56 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:57.186 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:56 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:57.186 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:56 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:58.401 INFO:teuthology.orchestra.run.vm03.stdout:Created osd(s) 1 on host 'vm03' 2026-03-09T15:00:58.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:58 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:58.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:58 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:58.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:58 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:00:58.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:58 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:00:58.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:58 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:58.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:58 vm03 ceph-mon[51263]: pgmap v17: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:00:58.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:58 vm03 ceph-mon[51263]: from='osd.1 [v2:192.168.123.103:6810/3334446627,v1:192.168.123.103:6811/3334446627]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:00:58.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:58 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:00:58.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:58 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:58.552 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:58 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:00:58.575 DEBUG:teuthology.orchestra.run.vm03:osd.1> sudo journalctl -f -n 0 -u ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@osd.1.service 2026-03-09T15:00:58.577 INFO:tasks.cephadm:Waiting for 2 OSDs to come up... 2026-03-09T15:00:58.577 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph osd stat -f json 2026-03-09T15:00:59.000 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:00:59.294 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:00:59.509 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":10,"num_osds":2,"num_up_osds":1,"osd_up_since":1773068448,"num_in_osds":2,"osd_in_since":1773068448,"num_remapped_pgs":0} 2026-03-09T15:00:59.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:59 vm03 ceph-mon[51263]: from='osd.1 [v2:192.168.123.103:6810/3334446627,v1:192.168.123.103:6811/3334446627]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T15:00:59.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:59 vm03 ceph-mon[51263]: osdmap e10: 2 total, 1 up, 2 in 2026-03-09T15:00:59.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:59 vm03 ceph-mon[51263]: from='osd.1 [v2:192.168.123.103:6810/3334446627,v1:192.168.123.103:6811/3334446627]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-09T15:00:59.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:59 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:00:59.584 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:00:59 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/471841841' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T15:00:59.584 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:00:59 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1[69393]: 2026-03-09T15:00:59.554+0000 7fa900aa1640 -1 osd.1 0 waiting for initial osdmap 2026-03-09T15:00:59.584 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:00:59 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1[69393]: 2026-03-09T15:00:59.560+0000 7fa8fc8cb640 -1 osd.1 11 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:01:00.510 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph osd stat -f json 2026-03-09T15:01:00.689 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:00.804 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: pgmap v19: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: from='osd.1 [v2:192.168.123.103:6810/3334446627,v1:192.168.123.103:6811/3334446627]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: osdmap e11: 2 total, 1 up, 2 in 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd.0", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd.1", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:00.805 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:00 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:01:01.023 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:01.282 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":12,"num_osds":2,"num_up_osds":2,"osd_up_since":1773068460,"num_in_osds":2,"osd_in_since":1773068448,"num_remapped_pgs":0} 2026-03-09T15:01:01.282 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph osd dump --format=json 2026-03-09T15:01:01.477 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:01.767 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:01.767 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":12,"fsid":"6884f6b8-1bc8-11f1-a1b7-432e3f447ddd","created":"2026-03-09T14:59:56.920803+0000","modified":"2026-03-09T15:01:00.559658+0000","last_up_change":"2026-03-09T15:01:00.559658+0000","last_in_change":"2026-03-09T15:00:48.810880+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":6,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":0,"max_osd":2,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[],"osds":[{"osd":0,"uuid":"8669d207-052b-42b6-8a7e-94f5ee8d25c8","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6803","nonce":1036806564}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6805","nonce":1036806564}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6809","nonce":1036806564}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6807","nonce":1036806564}]},"public_addr":"192.168.123.103:6803/1036806564","cluster_addr":"192.168.123.103:6805/1036806564","heartbeat_back_addr":"192.168.123.103:6809/1036806564","heartbeat_front_addr":"192.168.123.103:6807/1036806564","state":["exists","up"]},{"osd":1,"uuid":"b11c233a-dff6-4d35-97c7-c187f7f4116f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6811","nonce":3334446627}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6813","nonce":3334446627}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6817","nonce":3334446627}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6815","nonce":3334446627}]},"public_addr":"192.168.123.103:6811/3334446627","cluster_addr":"192.168.123.103:6813/3334446627","heartbeat_back_addr":"192.168.123.103:6817/3334446627","heartbeat_front_addr":"192.168.123.103:6815/3334446627","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:00:45.769445+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"0.000000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:0/3653846743":"2026-03-10T15:00:21.390318+0000","192.168.123.103:0/4146403003":"2026-03-10T15:00:21.390318+0000","192.168.123.103:6801/1056435028":"2026-03-10T15:00:21.390318+0000","192.168.123.103:6800/1056435028":"2026-03-10T15:00:21.390318+0000","192.168.123.103:0/1039282470":"2026-03-10T15:00:21.390318+0000","192.168.123.103:0/585439605":"2026-03-10T15:00:09.721665+0000","192.168.123.103:0/3182100101":"2026-03-10T15:00:09.721665+0000","192.168.123.103:0/988363636":"2026-03-10T15:00:09.721665+0000","192.168.123.103:6801/2923598611":"2026-03-10T15:00:09.721665+0000","192.168.123.103:6800/2923598611":"2026-03-10T15:00:09.721665+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T15:01:01.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:01 vm03 ceph-mon[51263]: purged_snaps scrub starts 2026-03-09T15:01:01.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:01 vm03 ceph-mon[51263]: purged_snaps scrub ok 2026-03-09T15:01:01.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:01 vm03 ceph-mon[51263]: Detected new or changed devices on vm03 2026-03-09T15:01:01.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:01 vm03 ceph-mon[51263]: Adjusting osd_memory_target on vm03 to 128.5M 2026-03-09T15:01:01.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:01 vm03 ceph-mon[51263]: Unable to set osd_memory_target on vm03 to 134768230: error parsing value: Value '134768230' is below minimum 939524096 2026-03-09T15:01:01.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:01 vm03 ceph-mon[51263]: osd.1 [v2:192.168.123.103:6810/3334446627,v1:192.168.123.103:6811/3334446627] boot 2026-03-09T15:01:01.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:01 vm03 ceph-mon[51263]: osdmap e12: 2 total, 2 up, 2 in 2026-03-09T15:01:01.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:01 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:01:01.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:01 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1034085926' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T15:01:01.981 INFO:tasks.cephadm.ceph_manager.ceph:[] 2026-03-09T15:01:01.982 INFO:tasks.cephadm:Setting up client nodes... 2026-03-09T15:01:01.982 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-09T15:01:02.185 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:02.544 INFO:teuthology.orchestra.run.vm03.stdout:[client.0] 2026-03-09T15:01:02.544 INFO:teuthology.orchestra.run.vm03.stdout: key = AQCu4K5pEbZfHxAAb68qlfVdkLzV1K19pvJOxw== 2026-03-09T15:01:02.705 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-09T15:01:02.705 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-09T15:01:02.705 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-09T15:01:02.742 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-09T15:01:02.743 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-09T15:01:02.743 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph mgr dump --format=json 2026-03-09T15:01:02.807 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:02 vm03 ceph-mon[51263]: pgmap v22: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:01:02.807 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:02 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3509103821' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:01:02.807 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:02 vm03 ceph-mon[51263]: osdmap e13: 2 total, 2 up, 2 in 2026-03-09T15:01:02.807 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:02 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3742395691' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:01:02.807 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:02 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3742395691' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T15:01:02.965 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:03.287 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:03.463 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"flags":0,"active_gid":14150,"active_name":"x","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6800","nonce":3475918186},{"type":"v1","addr":"192.168.123.103:6801","nonce":3475918186}]},"active_addr":"192.168.123.103:6801/3475918186","active_change":"2026-03-09T15:00:21.390409+0000","active_mgr_features":4540701547738038271,"available":true,"standbys":[],"modules":["cephadm","dashboard","iostat","nfs","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.103:8443/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":3,"active_clients":[{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":2490515517}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":1186070203}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":2486208004}]}]} 2026-03-09T15:01:03.463 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-09T15:01:03.463 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-09T15:01:03.464 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph osd dump --format=json 2026-03-09T15:01:03.709 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:03.982 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:03.982 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"fsid":"6884f6b8-1bc8-11f1-a1b7-432e3f447ddd","created":"2026-03-09T14:59:56.920803+0000","modified":"2026-03-09T15:01:01.764578+0000","last_up_change":"2026-03-09T15:01:00.559658+0000","last_in_change":"2026-03-09T15:00:48.810880+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":6,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":0,"max_osd":2,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[],"osds":[{"osd":0,"uuid":"8669d207-052b-42b6-8a7e-94f5ee8d25c8","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6803","nonce":1036806564}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6805","nonce":1036806564}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6809","nonce":1036806564}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6807","nonce":1036806564}]},"public_addr":"192.168.123.103:6803/1036806564","cluster_addr":"192.168.123.103:6805/1036806564","heartbeat_back_addr":"192.168.123.103:6809/1036806564","heartbeat_front_addr":"192.168.123.103:6807/1036806564","state":["exists","up"]},{"osd":1,"uuid":"b11c233a-dff6-4d35-97c7-c187f7f4116f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6811","nonce":3334446627}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6813","nonce":3334446627}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6817","nonce":3334446627}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6815","nonce":3334446627}]},"public_addr":"192.168.123.103:6811/3334446627","cluster_addr":"192.168.123.103:6813/3334446627","heartbeat_back_addr":"192.168.123.103:6817/3334446627","heartbeat_front_addr":"192.168.123.103:6815/3334446627","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:00:45.769445+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:00:58.978746+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:0/3653846743":"2026-03-10T15:00:21.390318+0000","192.168.123.103:0/4146403003":"2026-03-10T15:00:21.390318+0000","192.168.123.103:6801/1056435028":"2026-03-10T15:00:21.390318+0000","192.168.123.103:6800/1056435028":"2026-03-10T15:00:21.390318+0000","192.168.123.103:0/1039282470":"2026-03-10T15:00:21.390318+0000","192.168.123.103:0/585439605":"2026-03-10T15:00:09.721665+0000","192.168.123.103:0/3182100101":"2026-03-10T15:00:09.721665+0000","192.168.123.103:0/988363636":"2026-03-10T15:00:09.721665+0000","192.168.123.103:6801/2923598611":"2026-03-10T15:00:09.721665+0000","192.168.123.103:6800/2923598611":"2026-03-10T15:00:09.721665+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T15:01:04.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:03 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1563434505' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T15:01:04.168 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-09T15:01:04.168 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph osd dump --format=json 2026-03-09T15:01:04.362 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:04.654 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:04.654 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"fsid":"6884f6b8-1bc8-11f1-a1b7-432e3f447ddd","created":"2026-03-09T14:59:56.920803+0000","modified":"2026-03-09T15:01:01.764578+0000","last_up_change":"2026-03-09T15:01:00.559658+0000","last_in_change":"2026-03-09T15:00:48.810880+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":6,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":0,"max_osd":2,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[],"osds":[{"osd":0,"uuid":"8669d207-052b-42b6-8a7e-94f5ee8d25c8","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6803","nonce":1036806564}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6805","nonce":1036806564}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6809","nonce":1036806564}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":1036806564},{"type":"v1","addr":"192.168.123.103:6807","nonce":1036806564}]},"public_addr":"192.168.123.103:6803/1036806564","cluster_addr":"192.168.123.103:6805/1036806564","heartbeat_back_addr":"192.168.123.103:6809/1036806564","heartbeat_front_addr":"192.168.123.103:6807/1036806564","state":["exists","up"]},{"osd":1,"uuid":"b11c233a-dff6-4d35-97c7-c187f7f4116f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6811","nonce":3334446627}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6813","nonce":3334446627}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6817","nonce":3334446627}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3334446627},{"type":"v1","addr":"192.168.123.103:6815","nonce":3334446627}]},"public_addr":"192.168.123.103:6811/3334446627","cluster_addr":"192.168.123.103:6813/3334446627","heartbeat_back_addr":"192.168.123.103:6817/3334446627","heartbeat_front_addr":"192.168.123.103:6815/3334446627","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:00:45.769445+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:00:58.978746+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:0/3653846743":"2026-03-10T15:00:21.390318+0000","192.168.123.103:0/4146403003":"2026-03-10T15:00:21.390318+0000","192.168.123.103:6801/1056435028":"2026-03-10T15:00:21.390318+0000","192.168.123.103:6800/1056435028":"2026-03-10T15:00:21.390318+0000","192.168.123.103:0/1039282470":"2026-03-10T15:00:21.390318+0000","192.168.123.103:0/585439605":"2026-03-10T15:00:09.721665+0000","192.168.123.103:0/3182100101":"2026-03-10T15:00:09.721665+0000","192.168.123.103:0/988363636":"2026-03-10T15:00:09.721665+0000","192.168.123.103:6801/2923598611":"2026-03-10T15:00:09.721665+0000","192.168.123.103:6800/2923598611":"2026-03-10T15:00:09.721665+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T15:01:04.815 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph tell osd.0 flush_pg_stats 2026-03-09T15:01:04.815 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph tell osd.1 flush_pg_stats 2026-03-09T15:01:05.020 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:05.025 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:05.063 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:04 vm03 ceph-mon[51263]: pgmap v24: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:01:05.063 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:04 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1777635099' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:01:05.063 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:04 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/2390379620' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:01:05.462 INFO:teuthology.orchestra.run.vm03.stdout:51539607554 2026-03-09T15:01:05.462 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph osd last-stat-seq osd.1 2026-03-09T15:01:05.498 INFO:teuthology.orchestra.run.vm03.stdout:34359738373 2026-03-09T15:01:05.498 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph osd last-stat-seq osd.0 2026-03-09T15:01:05.696 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:05.839 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:06.035 INFO:teuthology.orchestra.run.vm03.stdout:51539607554 2026-03-09T15:01:06.158 INFO:teuthology.orchestra.run.vm03.stdout:34359738373 2026-03-09T15:01:06.247 INFO:tasks.cephadm.ceph_manager.ceph:need seq 51539607554 got 51539607554 for osd.1 2026-03-09T15:01:06.248 DEBUG:teuthology.parallel:result is None 2026-03-09T15:01:06.343 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738373 got 34359738373 for osd.0 2026-03-09T15:01:06.343 DEBUG:teuthology.parallel:result is None 2026-03-09T15:01:06.343 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-09T15:01:06.343 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph pg dump --format=json 2026-03-09T15:01:06.548 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:06.784 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:06.785 INFO:teuthology.orchestra.run.vm03.stderr:dumped all 2026-03-09T15:01:06.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:06 vm03 ceph-mon[51263]: pgmap v25: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:01:06.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:06 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1665399262' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-09T15:01:06.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:06 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/1373533495' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-09T15:01:06.974 INFO:teuthology.orchestra.run.vm03.stdout:{"pg_ready":true,"pg_map":{"version":25,"stamp":"2026-03-09T15:01:05.511088+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":0,"num_osds":2,"num_per_pool_osds":2,"num_per_pool_omap_osds":0,"kb":41934848,"kb_used":53908,"kb_used_data":224,"kb_used_omap":3,"kb_used_meta":53628,"kb_avail":41880940,"statfs":{"total":42941284352,"available":42886082560,"internally_reserved":0,"allocated":229376,"data_stored":55664,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":3178,"internal_metadata":54915990},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"0.000000"},"pg_stats":[],"pool_stats":[],"osd_stats":[{"osd":1,"up_from":12,"seq":51539607554,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26952,"kb_used_data":112,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940472,"statfs":{"total":21470642176,"available":21443043328,"internally_reserved":0,"allocated":114688,"data_stored":27832,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[]},{"osd":0,"up_from":8,"seq":34359738373,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26956,"kb_used_data":112,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940468,"statfs":{"total":21470642176,"available":21443039232,"internally_reserved":0,"allocated":114688,"data_stored":27832,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[]}} 2026-03-09T15:01:06.975 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph pg dump --format=json 2026-03-09T15:01:07.168 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:07.461 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:07.462 INFO:teuthology.orchestra.run.vm03.stderr:dumped all 2026-03-09T15:01:07.636 INFO:teuthology.orchestra.run.vm03.stdout:{"pg_ready":true,"pg_map":{"version":25,"stamp":"2026-03-09T15:01:05.511088+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":0,"num_osds":2,"num_per_pool_osds":2,"num_per_pool_omap_osds":0,"kb":41934848,"kb_used":53908,"kb_used_data":224,"kb_used_omap":3,"kb_used_meta":53628,"kb_avail":41880940,"statfs":{"total":42941284352,"available":42886082560,"internally_reserved":0,"allocated":229376,"data_stored":55664,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":3178,"internal_metadata":54915990},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"0.000000"},"pg_stats":[],"pool_stats":[],"osd_stats":[{"osd":1,"up_from":12,"seq":51539607554,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26952,"kb_used_data":112,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940472,"statfs":{"total":21470642176,"available":21443043328,"internally_reserved":0,"allocated":114688,"data_stored":27832,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[]},{"osd":0,"up_from":8,"seq":34359738373,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":26956,"kb_used_data":112,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940468,"statfs":{"total":21470642176,"available":21443039232,"internally_reserved":0,"allocated":114688,"data_stored":27832,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[]}} 2026-03-09T15:01:07.637 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-09T15:01:07.637 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-09T15:01:07.637 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-09T15:01:07.637 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph health --format=json 2026-03-09T15:01:07.829 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:08.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:07 vm03 ceph-mon[51263]: from='client.14224 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:08.092 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:08.092 INFO:teuthology.orchestra.run.vm03.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-09T15:01:08.271 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-09T15:01:08.271 INFO:tasks.cephadm:Setup complete, yielding 2026-03-09T15:01:08.271 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T15:01:08.274 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-09T15:01:08.274 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- bash -c 'ceph fs volume create cephfs' 2026-03-09T15:01:08.459 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:09.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:08 vm03 ceph-mon[51263]: from='client.14226 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:09.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:08 vm03 ceph-mon[51263]: pgmap v26: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:01:09.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:08 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/382060550' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-09T15:01:09.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:08 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "osd pool create", "pool": "cephfs.cephfs.meta"}]: dispatch 2026-03-09T15:01:10.060 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-09T15:01:10.062 INFO:tasks.cephadm:Waiting for ceph service mds.cephfs to start (timeout 300)... 2026-03-09T15:01:10.062 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch ls -f json 2026-03-09T15:01:10.062 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:09 vm03 ceph-mon[51263]: from='client.14230 -' entity='client.admin' cmd=[{"prefix": "fs volume create", "name": "cephfs", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:01:10.062 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:09 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd='[{"prefix": "osd pool create", "pool": "cephfs.cephfs.meta"}]': finished 2026-03-09T15:01:10.062 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:09 vm03 ceph-mon[51263]: osdmap e14: 2 total, 2 up, 2 in 2026-03-09T15:01:10.062 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:09 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.cephfs.data"}]: dispatch 2026-03-09T15:01:10.062 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:09 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mon-a[51238]: 2026-03-09T15:01:09.823+0000 7f1ad7843640 -1 log_channel(cluster) log [ERR] : Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-09T15:01:10.380 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:10.938 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:10.938 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T15:01:09.837677Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T15:01:09.834347Z", "running": 0, "size": 2}}, {"events": ["2026-03-09T15:00:30.716990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T15:00:30.705286Z", "last_refresh": "2026-03-09T15:00:57.458160Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T15:00:28.529747Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T15:00:28.496555Z", "last_refresh": "2026-03-09T15:00:57.458106Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T15:00:57.458195Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: pgmap v28: 32 pgs: 32 unknown; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED) 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd='[{"bulk": true, "prefix": "osd pool create", "pool": "cephfs.cephfs.data"}]': finished 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: osdmap e15: 2 total, 2 up, 2 in 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "fs new", "fs_name": "cephfs", "metadata": "cephfs.cephfs.meta", "data": "cephfs.cephfs.data"}]: dispatch 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: Health check failed: 1 filesystem is offline (MDS_ALL_DOWN) 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX) 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd='[{"prefix": "fs new", "fs_name": "cephfs", "metadata": "cephfs.cephfs.meta", "data": "cephfs.cephfs.data"}]': finished 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: osdmap e16: 2 total, 2 up, 2 in 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: fsmap cephfs:0 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: Saving service mds.cephfs spec with placement count:2 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mds.cephfs.vm03.prsrzf", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd='[{"prefix": "auth get-or-create", "entity": "mds.cephfs.vm03.prsrzf", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:01:11.075 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:10 vm03 ceph-mon[51263]: Deploying daemon mds.cephfs.vm03.prsrzf on vm03 2026-03-09T15:01:11.109 INFO:tasks.cephadm:mds.cephfs has 0/2 2026-03-09T15:01:11.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:11 vm03 ceph-mon[51263]: osdmap e17: 2 total, 2 up, 2 in 2026-03-09T15:01:11.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:11 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:11.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:11 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:11.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:11 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:11.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:11 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mds.cephfs.vm03.lcbhgx", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]: dispatch 2026-03-09T15:01:11.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:11 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd='[{"prefix": "auth get-or-create", "entity": "mds.cephfs.vm03.lcbhgx", "caps": ["mon", "profile mds", "osd", "allow rw tag cephfs *=*", "mds", "allow"]}]': finished 2026-03-09T15:01:11.957 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:11 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:01:12.110 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch ls -f json 2026-03-09T15:01:12.321 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:12.626 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:12.626 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T15:01:12.081472Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T15:01:09.834347Z", "running": 0, "size": 2}}, {"events": ["2026-03-09T15:00:30.716990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T15:00:30.705286Z", "last_refresh": "2026-03-09T15:00:57.458160Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T15:00:28.529747Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T15:00:28.496555Z", "last_refresh": "2026-03-09T15:00:57.458106Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T15:00:57.458195Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-09T15:01:12.789 INFO:tasks.cephadm:mds.cephfs has 0/2 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: from='client.14232 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: Deploying daemon mds.cephfs.vm03.lcbhgx on vm03 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: pgmap v32: 64 pgs: 9 creating+peering, 55 unknown; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: mds.? [v2:192.168.123.103:6818/1971741585,v1:192.168.123.103:6819/1971741585] up:boot 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: daemon mds.cephfs.vm03.prsrzf assigned to filesystem cephfs as rank 0 (now has 1 ranks) 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: Health check cleared: MDS_ALL_DOWN (was: 1 filesystem is offline) 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: Health check cleared: MDS_UP_LESS_THAN_MAX (was: 1 filesystem is online with fewer MDS than max_mds) 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: fsmap cephfs:0 1 up:standby 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "mds metadata", "who": "cephfs.vm03.prsrzf"}]: dispatch 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: fsmap cephfs:1 {0=cephfs.vm03.prsrzf=up:creating} 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: daemon mds.cephfs.vm03.prsrzf is now active in filesystem cephfs as rank 0 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled) 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: Cluster is now healthy 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:13.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:12 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:01:13.789 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch ls -f json 2026-03-09T15:01:13.986 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:14.067 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='client.14238 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: mds.? [v2:192.168.123.103:6818/1971741585,v1:192.168.123.103:6819/1971741585] up:active 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: mds.? [v2:192.168.123.103:6820/3147019051,v1:192.168.123.103:6821/3147019051] up:boot 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: fsmap cephfs:1 {0=cephfs.vm03.prsrzf=up:active} 1 up:standby 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "mds metadata", "who": "cephfs.vm03.lcbhgx"}]: dispatch 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: fsmap cephfs:1 {0=cephfs.vm03.prsrzf=up:active} 1 up:standby 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:01:14.068 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:13 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:14.255 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:14.255 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T15:01:12.081472Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T15:01:09.834347Z", "last_refresh": "2026-03-09T15:01:13.490304Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T15:00:30.716990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T15:00:30.705286Z", "last_refresh": "2026-03-09T15:01:13.490215Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T15:00:28.529747Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T15:00:28.496555Z", "last_refresh": "2026-03-09T15:01:13.490165Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T15:01:13.490247Z", "running": 2, "size": 2}, "unmanaged": true}] 2026-03-09T15:01:14.414 INFO:tasks.cephadm:mds.cephfs has 2/2 2026-03-09T15:01:14.414 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T15:01:14.416 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-09T15:01:14.416 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- bash -c 'ceph fs subvolumegroup create cephfs g1' 2026-03-09T15:01:14.608 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:14.894 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:14 vm03 ceph-mon[51263]: pgmap v33: 64 pgs: 39 active+undersized, 9 creating+peering, 16 unknown; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:01:14.895 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:01:14 vm03 ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x[51495]: 2026-03-09T15:01:14.870+0000 7fe1e952f640 -1 client.14244 error registering admin socket command: (17) File exists 2026-03-09T15:01:15.039 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- bash -c 'ceph fs subvolume create cephfs sub1 --group-name=g1 --mode=0777' 2026-03-09T15:01:15.227 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:15.695 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- bash -c 'ceph fs authorize cephfs client.smbdata / rw' 2026-03-09T15:01:15.899 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:16.317 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:15 vm03 ceph-mon[51263]: from='client.14240 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:16.317 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:15 vm03 ceph-mon[51263]: from='client.14242 -' entity='client.admin' cmd=[{"prefix": "fs subvolumegroup create", "vol_name": "cephfs", "group_name": "g1", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:01:16.318 INFO:teuthology.orchestra.run.vm03.stdout:[client.smbdata] 2026-03-09T15:01:16.318 INFO:teuthology.orchestra.run.vm03.stdout: key = AQC84K5plJ8BChAAbcw3Ar2enmGyvlnQXPc2zA== 2026-03-09T15:01:16.318 INFO:teuthology.orchestra.run.vm03.stdout: caps mds = "allow rw fsname=cephfs" 2026-03-09T15:01:16.318 INFO:teuthology.orchestra.run.vm03.stdout: caps mon = "allow r fsname=cephfs" 2026-03-09T15:01:16.318 INFO:teuthology.orchestra.run.vm03.stdout: caps osd = "allow rw tag cephfs data=cephfs" 2026-03-09T15:01:16.806 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- bash -c 'ceph osd pool create .smb --yes-i-really-mean-it' 2026-03-09T15:01:16.984 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:17.374 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:17 vm03 ceph-mon[51263]: pgmap v34: 64 pgs: 3 active+undersized+degraded, 61 active+undersized; 592 B data, 53 MiB used, 40 GiB / 40 GiB avail; 7/21 objects degraded (33.333%) 2026-03-09T15:01:17.374 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:17 vm03 ceph-mon[51263]: from='client.14246 -' entity='client.admin' cmd=[{"prefix": "fs subvolume create", "vol_name": "cephfs", "sub_name": "sub1", "group_name": "g1", "mode": "0777", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:01:17.374 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:17 vm03 ceph-mon[51263]: Health check failed: Degraded data redundancy: 7/21 objects degraded (33.333%), 3 pgs degraded (PG_DEGRADED) 2026-03-09T15:01:17.374 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:17 vm03 ceph-mon[51263]: mgrmap e14: x(active, since 54s) 2026-03-09T15:01:17.374 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:17 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/616616626' entity='client.admin' cmd=[{"prefix": "fs authorize", "filesystem": "cephfs", "entity": "client.smbdata", "caps": ["/", "rw"]}]: dispatch 2026-03-09T15:01:17.374 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:17 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/616616626' entity='client.admin' cmd='[{"prefix": "fs authorize", "filesystem": "cephfs", "entity": "client.smbdata", "caps": ["/", "rw"]}]': finished 2026-03-09T15:01:17.374 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:17 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:18.181 INFO:teuthology.orchestra.run.vm03.stderr:pool '.smb' created 2026-03-09T15:01:18.452 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:18 vm03 ceph-mon[51263]: mds.? [v2:192.168.123.103:6818/1971741585,v1:192.168.123.103:6819/1971741585] up:active 2026-03-09T15:01:18.452 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:18 vm03 ceph-mon[51263]: mds.? [v2:192.168.123.103:6820/3147019051,v1:192.168.123.103:6821/3147019051] up:standby 2026-03-09T15:01:18.452 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:18 vm03 ceph-mon[51263]: fsmap cephfs:1 {0=cephfs.vm03.prsrzf=up:active} 1 up:standby 2026-03-09T15:01:18.452 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:18 vm03 ceph-mon[51263]: pgmap v35: 64 pgs: 11 active+undersized+degraded, 53 active+undersized; 28 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 22 KiB/s wr, 18 op/s; 23/69 objects degraded (33.333%) 2026-03-09T15:01:18.452 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:18 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/708727007' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": ".smb", "yes_i_really_mean_it": true}]: dispatch 2026-03-09T15:01:18.453 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- bash -c 'ceph osd pool application enable .smb smb' 2026-03-09T15:01:18.645 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:19.206 INFO:teuthology.orchestra.run.vm03.stderr:enabled application 'smb' on pool '.smb' 2026-03-09T15:01:19.421 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- bash -c 'rados --pool=.smb --namespace=admem1 put conf.toml /dev/stdin' 2026-03-09T15:01:19.446 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:19 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/708727007' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": ".smb", "yes_i_really_mean_it": true}]': finished 2026-03-09T15:01:19.447 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:19 vm03 ceph-mon[51263]: osdmap e18: 2 total, 2 up, 2 in 2026-03-09T15:01:19.447 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:19 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3964866894' entity='client.admin' cmd=[{"prefix": "osd pool application enable", "pool": ".smb", "app": "smb"}]: dispatch 2026-03-09T15:01:19.606 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:19.928 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- bash -c 'ceph config-key set smb/config/admem1/join1.json -i -' 2026-03-09T15:01:20.117 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:20.437 INFO:teuthology.orchestra.run.vm03.stderr:set smb/config/admem1/join1.json 2026-03-09T15:01:20.437 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:20 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/3964866894' entity='client.admin' cmd='[{"prefix": "osd pool application enable", "pool": ".smb", "app": "smb"}]': finished 2026-03-09T15:01:20.437 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:20 vm03 ceph-mon[51263]: osdmap e19: 2 total, 2 up, 2 in 2026-03-09T15:01:20.437 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:20 vm03 ceph-mon[51263]: pgmap v38: 96 pgs: 32 unknown, 11 active+undersized+degraded, 53 active+undersized; 28 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 11 KiB/s wr, 9 op/s; 23/69 objects degraded (33.333%) 2026-03-09T15:01:20.629 INFO:teuthology.run_tasks:Running task cephadm.apply... 2026-03-09T15:01:20.633 INFO:tasks.cephadm:Applying spec(s): cluster_id: admem1 config_uri: rados://.smb/admem1/conf.toml custom_dns: - 192.168.123.104 features: - domain include_ceph_users: - client.smbdata join_sources: - rados:mon-config-key:smb/config/admem1/join1.json placement: count: 1 service_id: admem1 service_type: smb 2026-03-09T15:01:20.633 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch apply -i - 2026-03-09T15:01:20.823 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:21.149 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled smb.admem1 update... 2026-03-09T15:01:21.428 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:21 vm03 ceph-mon[51263]: osdmap e20: 2 total, 2 up, 2 in 2026-03-09T15:01:21.428 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:21 vm03 ceph-mon[51263]: from='client.? 192.168.123.103:0/2152169216' entity='client.admin' 2026-03-09T15:01:21.428 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:21.428 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:01:21.428 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:01:21.428 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:01:21.428 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:21.429 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.smb.config.admem1.vm03.fpeqaw", "caps": ["mon", "allow r, allow command \"config-key get\" with \"key\" prefix \"smb/config/admem1/\"", "osd", "allow r pool=.smb"]}]: dispatch 2026-03-09T15:01:21.429 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd='[{"prefix": "auth get-or-create", "entity": "client.smb.config.admem1.vm03.fpeqaw", "caps": ["mon", "allow r, allow command \"config-key get\" with \"key\" prefix \"smb/config/admem1/\"", "osd", "allow r pool=.smb"]}]': finished 2026-03-09T15:01:21.429 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.smbdata"}]: dispatch 2026-03-09T15:01:21.429 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:21 vm03 ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:01:21.587 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-09T15:01:21.589 INFO:tasks.cephadm:Waiting for ceph service smb.admem1 to start (timeout 300)... 2026-03-09T15:01:21.590 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch ls -f json 2026-03-09T15:01:21.829 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:22.312 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:22.312 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T15:01:12.081472Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T15:01:09.834347Z", "last_refresh": "2026-03-09T15:01:13.490304Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T15:00:30.716990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T15:00:30.705286Z", "last_refresh": "2026-03-09T15:01:13.490215Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T15:00:28.529747Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T15:00:28.496555Z", "last_refresh": "2026-03-09T15:01:13.490165Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T15:01:13.490247Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T15:01:21.147560Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.104"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T15:01:21.144567Z", "running": 0, "size": 1}}] 2026-03-09T15:01:22.454 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:22 vm03 ceph-mon[51263]: from='client.14259 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:01:22.454 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:22 vm03 ceph-mon[51263]: Saving service smb.admem1 spec with placement count:1 2026-03-09T15:01:22.454 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:22 vm03 ceph-mon[51263]: Deploying daemon smb.admem1.vm03.fpeqaw on vm03 2026-03-09T15:01:22.454 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:22 vm03 ceph-mon[51263]: pgmap v40: 96 pgs: 18 unknown, 11 active+undersized+degraded, 67 active+undersized; 28 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 7.5 KiB/s wr, 6 op/s; 23/69 objects degraded (33.333%) 2026-03-09T15:01:22.486 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T15:01:23.487 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch ls -f json 2026-03-09T15:01:23.693 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:23.810 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:23 vm03 ceph-mon[51263]: from='client.14261 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:24.301 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:24.301 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T15:01:12.081472Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T15:01:09.834347Z", "last_refresh": "2026-03-09T15:01:13.490304Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T15:00:30.716990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T15:00:30.705286Z", "last_refresh": "2026-03-09T15:01:13.490215Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T15:00:28.529747Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T15:00:28.496555Z", "last_refresh": "2026-03-09T15:01:13.490165Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T15:01:13.490247Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T15:01:21.147560Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.104"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T15:01:21.144567Z", "running": 0, "size": 1}}] 2026-03-09T15:01:24.505 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T15:01:24.583 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:24 vm03 ceph-mon[51263]: pgmap v41: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 36 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 2.0 KiB/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:24.599 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:24 vm03 ceph-mon[51263]: Health check update: Degraded data redundancy: 23/69 objects degraded (33.333%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T15:01:25.505 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch ls -f json 2026-03-09T15:01:25.543 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:25 vm03 ceph-mon[51263]: from='client.14263 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:25.729 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:26.058 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:26.058 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T15:01:12.081472Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T15:01:09.834347Z", "last_refresh": "2026-03-09T15:01:13.490304Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T15:00:30.716990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T15:00:30.705286Z", "last_refresh": "2026-03-09T15:01:13.490215Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T15:00:28.529747Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T15:00:28.496555Z", "last_refresh": "2026-03-09T15:01:13.490165Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T15:01:13.490247Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T15:01:21.147560Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.104"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T15:01:21.144567Z", "running": 0, "size": 1}}] 2026-03-09T15:01:26.565 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T15:01:26.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:26 vm03 ceph-mon[51263]: pgmap v42: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 36 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 1.6 KiB/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:27.566 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch ls -f json 2026-03-09T15:01:27.831 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:27.890 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:27 vm03 ceph-mon[51263]: from='client.14265 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:28.165 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:28.165 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T15:01:12.081472Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T15:01:09.834347Z", "last_refresh": "2026-03-09T15:01:13.490304Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T15:00:30.716990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T15:00:30.705286Z", "last_refresh": "2026-03-09T15:01:13.490215Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T15:00:28.529747Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T15:00:28.496555Z", "last_refresh": "2026-03-09T15:01:13.490165Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T15:01:13.490247Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T15:01:21.147560Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.104"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T15:01:21.144567Z", "running": 0, "size": 1}}] 2026-03-09T15:01:28.350 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T15:01:29.333 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:28 vm03 ceph-mon[51263]: pgmap v43: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 36 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 1.4 KiB/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:29.333 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:28 vm03 ceph-mon[51263]: Health check update: Degraded data redundancy: 24/72 objects degraded (33.333%), 15 pgs degraded (PG_DEGRADED) 2026-03-09T15:01:29.351 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch ls -f json 2026-03-09T15:01:29.576 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:30.132 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:30.132 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T15:01:12.081472Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T15:01:09.834347Z", "last_refresh": "2026-03-09T15:01:13.490304Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T15:00:30.716990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T15:00:30.705286Z", "last_refresh": "2026-03-09T15:01:13.490215Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T15:00:28.529747Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T15:00:28.496555Z", "last_refresh": "2026-03-09T15:01:13.490165Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T15:01:13.490247Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T15:01:21.147560Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.104"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T15:01:21.144567Z", "running": 0, "size": 1}}] 2026-03-09T15:01:30.298 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:30 vm03 ceph-mon[51263]: from='client.14267 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:30.298 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:30 vm03 ceph-mon[51263]: pgmap v44: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 36 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 1.2 KiB/s wr, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:30.359 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T15:01:31.207 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:31 vm03.local ceph-mon[51263]: from='client.14269 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:31.359 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch ls -f json 2026-03-09T15:01:32.579 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:32.798 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:32 vm03.local ceph-mon[51263]: pgmap v45: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 36 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 1.1 KiB/s wr, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:32.916 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:32.916 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T15:01:12.081472Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T15:01:09.834347Z", "last_refresh": "2026-03-09T15:01:13.490304Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T15:00:30.716990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T15:00:30.705286Z", "last_refresh": "2026-03-09T15:01:13.490215Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T15:00:28.529747Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T15:00:28.496555Z", "last_refresh": "2026-03-09T15:01:13.490165Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T15:01:13.490247Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T15:01:21.147560Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.104"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T15:01:21.144567Z", "running": 0, "size": 1}}] 2026-03-09T15:01:33.123 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T15:01:33.622 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:33 vm03.local ceph-mon[51263]: from='client.? 10.88.0.3:0/2712274443' entity='client.smb.config.admem1.vm03.fpeqaw' cmd=[{"prefix": "config-key get", "key": "smb/config/admem1/join1.json"}]: dispatch 2026-03-09T15:01:34.124 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch ls -f json 2026-03-09T15:01:34.689 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:34.732 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:34 vm03.local ceph-mon[51263]: from='client.14273 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:34.732 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:34 vm03.local ceph-mon[51263]: pgmap v46: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 85 B/s rd, 1.2 KiB/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:34.732 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:34 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:34.732 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:34 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:34.732 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:34 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:34.732 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:34 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:34.732 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:34 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:01:35.088 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:35.088 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T15:01:12.081472Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T15:01:09.834347Z", "last_refresh": "2026-03-09T15:01:13.490304Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T15:00:30.716990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T15:00:30.705286Z", "last_refresh": "2026-03-09T15:01:13.490215Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T15:00:28.529747Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T15:00:28.496555Z", "last_refresh": "2026-03-09T15:01:13.490165Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T15:01:13.490247Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T15:01:34.070269Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.104"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T15:01:21.144567Z", "running": 0, "size": 1}}] 2026-03-09T15:01:35.273 INFO:tasks.cephadm:smb.admem1 has 0/1 2026-03-09T15:01:36.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:35 vm03.local ceph-mon[51263]: config is a no-op 2026-03-09T15:01:36.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:35 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:36.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:35 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:36.274 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph orch ls -f json 2026-03-09T15:01:36.504 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:01:36.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:36 vm03.local ceph-mon[51263]: from='client.14285 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:36.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:36 vm03.local ceph-mon[51263]: pgmap v47: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 85 B/s rd, 255 B/s wr, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:36.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:36 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:01:36.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:36 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:01:36.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:36 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:36.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:36 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:01:36.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:36 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:01:36.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:36 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:01:36.833 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:36 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:36.834 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:36 vm03.local ceph-mon[51263]: from='mgr.14150 192.168.123.103:0/3781887872' entity='mgr.x' 2026-03-09T15:01:36.839 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-09T15:01:36.839 INFO:teuthology.orchestra.run.vm03.stdout:[{"events": ["2026-03-09T15:01:12.081472Z service:mds.cephfs [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "cephfs", "service_name": "mds.cephfs", "service_type": "mds", "status": {"created": "2026-03-09T15:01:09.834347Z", "last_refresh": "2026-03-09T15:01:35.378477Z", "running": 2, "size": 2}}, {"events": ["2026-03-09T15:00:30.716990Z service:mgr [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03=x"]}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-09T15:00:30.705286Z", "last_refresh": "2026-03-09T15:01:35.378336Z", "running": 1, "size": 1}}, {"events": ["2026-03-09T15:00:28.529747Z service:mon [INFO] \"service was created\""], "placement": {"count": 1, "hosts": ["vm03:192.168.123.103=a"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-09T15:00:28.496555Z", "last_refresh": "2026-03-09T15:01:35.378264Z", "running": 1, "size": 1}}, {"service_name": "osd", "service_type": "osd", "spec": {"filter_logic": "AND", "objectstore": "bluestore"}, "status": {"container_image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", "container_image_name": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", "last_refresh": "2026-03-09T15:01:35.378385Z", "running": 2, "size": 2}, "unmanaged": true}, {"events": ["2026-03-09T15:01:34.070269Z service:smb.admem1 [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "admem1", "service_name": "smb.admem1", "service_type": "smb", "spec": {"cluster_id": "admem1", "config_uri": "rados://.smb/admem1/conf.toml", "custom_dns": ["192.168.123.104"], "features": ["domain"], "include_ceph_users": ["client.smbdata"], "join_sources": ["rados:mon-config-key:smb/config/admem1/join1.json"]}, "status": {"created": "2026-03-09T15:01:21.144567Z", "last_refresh": "2026-03-09T15:01:35.378564Z", "running": 1, "size": 1}}] 2026-03-09T15:01:37.015 INFO:tasks.cephadm:smb.admem1 has 1/1 2026-03-09T15:01:37.015 INFO:teuthology.run_tasks:Running task cephadm.exec... 2026-03-09T15:01:37.023 INFO:tasks.cephadm:Running commands on role host.b host ubuntu@vm04.local 2026-03-09T15:01:37.023 DEBUG:teuthology.orchestra.run.vm04:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'sleep 30' 2026-03-09T15:01:37.051 INFO:teuthology.orchestra.run.vm04.stderr:+ sleep 30 2026-03-09T15:01:38.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:37 vm03.local ceph-mon[51263]: from='client.14291 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:01:39.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:38 vm03.local ceph-mon[51263]: pgmap v48: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 511 B/s rd, 255 B/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:41.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:40 vm03.local ceph-mon[51263]: pgmap v49: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 511 B/s rd, 255 B/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:43.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:42 vm03.local ceph-mon[51263]: pgmap v50: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 511 B/s rd, 255 B/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:45.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:44 vm03.local ceph-mon[51263]: pgmap v51: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 938 B/s rd, 255 B/s wr, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:47.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:46 vm03.local ceph-mon[51263]: pgmap v52: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 853 B/s rd, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:49.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:48 vm03.local ceph-mon[51263]: pgmap v53: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 1.2 KiB/s rd, 2 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:51.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:50 vm03.local ceph-mon[51263]: pgmap v54: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 853 B/s rd, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:53.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:52 vm03.local ceph-mon[51263]: pgmap v55: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 853 B/s rd, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:55.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:54 vm03.local ceph-mon[51263]: pgmap v56: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 938 B/s rd, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:57.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:56 vm03.local ceph-mon[51263]: pgmap v57: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 511 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:01:59.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:01:58 vm03.local ceph-mon[51263]: pgmap v58: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 597 B/s rd, 1 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:02:01.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:02:00 vm03.local ceph-mon[51263]: pgmap v59: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 170 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:02:03.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:02:02 vm03.local ceph-mon[51263]: pgmap v60: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 170 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:02:05.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:02:04 vm03.local ceph-mon[51263]: pgmap v61: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 255 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:02:07.054 DEBUG:teuthology.orchestra.run.vm04:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'sudo podman run --rm --net=host --dns=192.168.123.104 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U DOMAIN1\\ckent%1115Rose. //192.168.123.103/share1 -c ls' 2026-03-09T15:02:07.083 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:02:06 vm03.local ceph-mon[51263]: pgmap v62: 96 pgs: 15 active+undersized+degraded, 81 active+undersized; 38 KiB data, 53 MiB used, 40 GiB / 40 GiB avail; 170 B/s rd, 0 op/s; 24/72 objects degraded (33.333%) 2026-03-09T15:02:07.118 INFO:teuthology.orchestra.run.vm04.stderr:+ sudo podman run --rm --net=host --dns=192.168.123.104 -eKRB5_CONFIG=/dev/null quay.io/samba.org/samba-client:latest smbclient -U 'DOMAIN1\ckent%1115Rose.' //192.168.123.103/share1 -c ls 2026-03-09T15:02:07.314 INFO:teuthology.orchestra.run.vm04.stdout: . D 0 Mon Mar 9 15:01:14 2026 2026-03-09T15:02:07.314 INFO:teuthology.orchestra.run.vm04.stdout: .. D 0 Mon Mar 9 15:01:14 2026 2026-03-09T15:02:07.314 INFO:teuthology.orchestra.run.vm04.stdout: volumes D 0 Mon Mar 9 15:01:15 2026 2026-03-09T15:02:07.314 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T15:02:07.314 INFO:teuthology.orchestra.run.vm04.stdout: 13258752 blocks of size 1024. 13258752 blocks available 2026-03-09T15:02:07.441 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-09T15:02:07.444 INFO:tasks.cephadm:Teardown begin 2026-03-09T15:02:07.444 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:02:07.476 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:02:07.513 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-09T15:02:07.514 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd -- ceph mgr module disable cephadm 2026-03-09T15:02:07.722 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/mon.a/config 2026-03-09T15:02:07.745 INFO:teuthology.orchestra.run.vm03.stderr:Error: statfs /etc/ceph/ceph.client.admin.keyring: no such file or directory 2026-03-09T15:02:07.770 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-03-09T15:02:07.771 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-09T15:02:07.771 DEBUG:teuthology.orchestra.run.vm03:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T15:02:07.790 DEBUG:teuthology.orchestra.run.vm04:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T15:02:07.806 INFO:tasks.cephadm:Stopping all daemons... 2026-03-09T15:02:07.806 INFO:tasks.cephadm.mon.a:Stopping mon.a... 2026-03-09T15:02:07.806 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mon.a 2026-03-09T15:02:08.132 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:02:07 vm03.local systemd[1]: Stopping Ceph mon.a for 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd... 2026-03-09T15:02:08.132 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:02:07 vm03.local ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mon-a[51238]: 2026-03-09T15:02:07.947+0000 7f1add04e640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:02:08.132 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:02:07 vm03.local ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mon-a[51238]: 2026-03-09T15:02:07.947+0000 7f1add04e640 -1 mon.a@0(leader) e1 *** Got Signal Terminated *** 2026-03-09T15:02:08.132 INFO:journalctl@ceph.mon.a.vm03.stdout:Mar 09 15:02:08 vm03.local podman[79477]: 2026-03-09 15:02:08.020155059 +0000 UTC m=+0.088431969 container died 1cb95ecfe67db391dd4eb859d15437cb1101e8f22b0674a5d83a9dbaa2f8be5d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mon-a, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.build-date=20260223, ceph=True, org.label-schema.schema-version=1.0) 2026-03-09T15:02:08.223 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mon.a.service' 2026-03-09T15:02:08.264 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:02:08.264 INFO:tasks.cephadm.mon.a:Stopped mon.a 2026-03-09T15:02:08.264 INFO:tasks.cephadm.mgr.x:Stopping mgr.x... 2026-03-09T15:02:08.264 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mgr.x 2026-03-09T15:02:08.470 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:02:08 vm03.local systemd[1]: Stopping Ceph mgr.x for 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd... 2026-03-09T15:02:08.724 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:02:08 vm03.local podman[79591]: 2026-03-09 15:02:08.477563284 +0000 UTC m=+0.066801772 container died c2d75d3874e5a65b1be100c56412819d71d93c05fa46fef4b738dd18a819c0f4 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=squid) 2026-03-09T15:02:08.724 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:02:08 vm03.local podman[79591]: 2026-03-09 15:02:08.65742915 +0000 UTC m=+0.246667638 container remove c2d75d3874e5a65b1be100c56412819d71d93c05fa46fef4b738dd18a819c0f4 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3) 2026-03-09T15:02:08.725 INFO:journalctl@ceph.mgr.x.vm03.stdout:Mar 09 15:02:08 vm03.local bash[79591]: ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-mgr-x 2026-03-09T15:02:08.729 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@mgr.x.service' 2026-03-09T15:02:08.760 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:02:08.760 INFO:tasks.cephadm.mgr.x:Stopped mgr.x 2026-03-09T15:02:08.761 INFO:tasks.cephadm.osd.0:Stopping osd.0... 2026-03-09T15:02:08.761 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@osd.0 2026-03-09T15:02:09.084 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:08 vm03.local systemd[1]: Stopping Ceph osd.0 for 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd... 2026-03-09T15:02:09.084 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:08 vm03.local ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0[63825]: 2026-03-09T15:02:08.912+0000 7ff1dd3c7640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:02:09.084 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:08 vm03.local ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0[63825]: 2026-03-09T15:02:08.912+0000 7ff1dd3c7640 -1 osd.0 20 *** Got signal Terminated *** 2026-03-09T15:02:09.084 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:08 vm03.local ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0[63825]: 2026-03-09T15:02:08.912+0000 7ff1dd3c7640 -1 osd.0 20 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:02:14.237 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:13 vm03.local podman[79707]: 2026-03-09 15:02:13.945099172 +0000 UTC m=+5.046488845 container died a453c29bcf53a8fd63b8eea717c395ef37f34c35263638845f8c748004c00eab (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.schema-version=1.0) 2026-03-09T15:02:14.238 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:14 vm03.local podman[79707]: 2026-03-09 15:02:14.079564226 +0000 UTC m=+5.180953899 container remove a453c29bcf53a8fd63b8eea717c395ef37f34c35263638845f8c748004c00eab (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:02:14.238 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:14 vm03.local bash[79707]: ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0 2026-03-09T15:02:14.549 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:14 vm03.local podman[79789]: 2026-03-09 15:02:14.23777041 +0000 UTC m=+0.019876395 container create 4b8a6683deaa1f2d9c48b32b8fd0e2c91f7abe49bfaddde83fcbe965c3f41db0 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0-deactivate, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:02:14.549 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:14 vm03.local podman[79789]: 2026-03-09 15:02:14.276935589 +0000 UTC m=+0.059041585 container init 4b8a6683deaa1f2d9c48b32b8fd0e2c91f7abe49bfaddde83fcbe965c3f41db0 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20260223, ceph=True, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:02:14.549 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:14 vm03.local podman[79789]: 2026-03-09 15:02:14.283681046 +0000 UTC m=+0.065787042 container start 4b8a6683deaa1f2d9c48b32b8fd0e2c91f7abe49bfaddde83fcbe965c3f41db0 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T15:02:14.549 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:14 vm03.local podman[79789]: 2026-03-09 15:02:14.284903354 +0000 UTC m=+0.067009350 container attach 4b8a6683deaa1f2d9c48b32b8fd0e2c91f7abe49bfaddde83fcbe965c3f41db0 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0-deactivate, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True) 2026-03-09T15:02:14.549 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:14 vm03.local podman[79789]: 2026-03-09 15:02:14.229255672 +0000 UTC m=+0.011361678 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T15:02:14.549 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:14 vm03.local conmon[79800]: conmon 4b8a6683deaa1f2d9c48 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-4b8a6683deaa1f2d9c48b32b8fd0e2c91f7abe49bfaddde83fcbe965c3f41db0.scope/memory.events 2026-03-09T15:02:14.549 INFO:journalctl@ceph.osd.0.vm03.stdout:Mar 09 15:02:14 vm03.local podman[79789]: 2026-03-09 15:02:14.432060806 +0000 UTC m=+0.214166802 container died 4b8a6683deaa1f2d9c48b32b8fd0e2c91f7abe49bfaddde83fcbe965c3f41db0 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-0-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default) 2026-03-09T15:02:14.572 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@osd.0.service' 2026-03-09T15:02:14.609 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:02:14.609 INFO:tasks.cephadm.osd.0:Stopped osd.0 2026-03-09T15:02:14.609 INFO:tasks.cephadm.osd.1:Stopping osd.1... 2026-03-09T15:02:14.609 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@osd.1 2026-03-09T15:02:14.803 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:14 vm03.local systemd[1]: Stopping Ceph osd.1 for 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd... 2026-03-09T15:02:15.083 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:14 vm03.local ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1[69393]: 2026-03-09T15:02:14.801+0000 7fa901ab5640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:02:15.083 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:14 vm03.local ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1[69393]: 2026-03-09T15:02:14.801+0000 7fa901ab5640 -1 osd.1 20 *** Got signal Terminated *** 2026-03-09T15:02:15.083 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:14 vm03.local ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1[69393]: 2026-03-09T15:02:14.801+0000 7fa901ab5640 -1 osd.1 20 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:02:20.125 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:19 vm03.local podman[79909]: 2026-03-09 15:02:19.839486066 +0000 UTC m=+5.083880254 container died 9da9354732d3969b4ea9e77e2f686408fb89de258d80bb155f5d68dd908295c4 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:02:20.125 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:19 vm03.local podman[79909]: 2026-03-09 15:02:19.967092253 +0000 UTC m=+5.211486441 container remove 9da9354732d3969b4ea9e77e2f686408fb89de258d80bb155f5d68dd908295c4 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:02:20.125 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:19 vm03.local bash[79909]: ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1 2026-03-09T15:02:20.433 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:20 vm03.local podman[79986]: 2026-03-09 15:02:20.125210199 +0000 UTC m=+0.017890409 container create dd05125fd3340c10578eafa08c058fbdde69ca6aaaa16358293fc01d0bef3598 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T15:02:20.433 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:20 vm03.local podman[79986]: 2026-03-09 15:02:20.166327243 +0000 UTC m=+0.059007453 container init dd05125fd3340c10578eafa08c058fbdde69ca6aaaa16358293fc01d0bef3598 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1-deactivate, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:02:20.433 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:20 vm03.local podman[79986]: 2026-03-09 15:02:20.172681648 +0000 UTC m=+0.065361858 container start dd05125fd3340c10578eafa08c058fbdde69ca6aaaa16358293fc01d0bef3598 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1-deactivate, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-09T15:02:20.433 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:20 vm03.local podman[79986]: 2026-03-09 15:02:20.17567455 +0000 UTC m=+0.068354769 container attach dd05125fd3340c10578eafa08c058fbdde69ca6aaaa16358293fc01d0bef3598 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T15:02:20.433 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:20 vm03.local podman[79986]: 2026-03-09 15:02:20.11823862 +0000 UTC m=+0.010918830 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T15:02:20.433 INFO:journalctl@ceph.osd.1.vm03.stdout:Mar 09 15:02:20 vm03.local podman[79986]: 2026-03-09 15:02:20.317249433 +0000 UTC m=+0.209929643 container died dd05125fd3340c10578eafa08c058fbdde69ca6aaaa16358293fc01d0bef3598 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd-osd-1-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.build-date=20260223, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, ceph=True, org.label-schema.vendor=CentOS) 2026-03-09T15:02:20.456 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-6884f6b8-1bc8-11f1-a1b7-432e3f447ddd@osd.1.service' 2026-03-09T15:02:20.493 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:02:20.493 INFO:tasks.cephadm.osd.1:Stopped osd.1 2026-03-09T15:02:20.493 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd --force --keep-logs 2026-03-09T15:02:20.670 INFO:teuthology.orchestra.run.vm03.stdout:Deleting cluster with fsid: 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T15:02:31.648 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd --force --keep-logs 2026-03-09T15:02:31.784 INFO:teuthology.orchestra.run.vm04.stdout:Deleting cluster with fsid: 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T15:02:31.845 INFO:teuthology.orchestra.run.vm04.stderr:Traceback (most recent call last): 2026-03-09T15:02:31.845 INFO:teuthology.orchestra.run.vm04.stderr: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T15:02:31.848 INFO:teuthology.orchestra.run.vm04.stderr: return _run_code(code, main_globals, None, 2026-03-09T15:02:31.848 INFO:teuthology.orchestra.run.vm04.stderr: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T15:02:31.848 INFO:teuthology.orchestra.run.vm04.stderr: exec(code, run_globals) 2026-03-09T15:02:31.848 INFO:teuthology.orchestra.run.vm04.stderr: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T15:02:31.848 INFO:teuthology.orchestra.run.vm04.stderr: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T15:02:31.848 INFO:teuthology.orchestra.run.vm04.stderr: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 4338, in command_rm_cluster 2026-03-09T15:02:31.848 INFO:teuthology.orchestra.run.vm04.stderr: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 4402, in _rm_cluster 2026-03-09T15:02:31.849 INFO:teuthology.orchestra.run.vm04.stderr: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 4328, in get_ceph_cluster_count 2026-03-09T15:02:31.849 INFO:teuthology.orchestra.run.vm04.stderr:FileNotFoundError: [Errno 2] No such file or directory: '/var/lib/ceph' 2026-03-09T15:02:31.860 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:02:31.861 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:02:31.896 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:02:31.923 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-09T15:02:31.923 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/crash to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/516/remote/vm03/crash 2026-03-09T15:02:31.923 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/crash -- . 2026-03-09T15:02:31.967 INFO:teuthology.orchestra.run.vm03.stderr:tar: /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/crash: Cannot open: No such file or directory 2026-03-09T15:02:31.968 INFO:teuthology.orchestra.run.vm03.stderr:tar: Error is not recoverable: exiting now 2026-03-09T15:02:31.969 DEBUG:teuthology.misc:Transferring archived files from vm04:/var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/crash to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/516/remote/vm04/crash 2026-03-09T15:02:31.969 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/crash -- . 2026-03-09T15:02:31.995 INFO:teuthology.orchestra.run.vm04.stderr:tar: /var/lib/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/crash: Cannot open: No such file or directory 2026-03-09T15:02:31.995 INFO:teuthology.orchestra.run.vm04.stderr:tar: Error is not recoverable: exiting now 2026-03-09T15:02:31.996 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-09T15:02:31.996 DEBUG:teuthology.orchestra.run.vm03:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | head -n 1 2026-03-09T15:02:32.037 INFO:tasks.cephadm:Compressing logs... 2026-03-09T15:02:32.038 DEBUG:teuthology.orchestra.run.vm03:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T15:02:32.079 DEBUG:teuthology.orchestra.run.vm04:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T15:02:32.101 INFO:teuthology.orchestra.run.vm03.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T15:02:32.101 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T15:02:32.102 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mon.a.log 2026-03-09T15:02:32.102 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph.log 2026-03-09T15:02:32.103 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/cephadm.log: /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mon.a.log: 91.2% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T15:02:32.104 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph.audit.log 2026-03-09T15:02:32.104 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T15:02:32.104 INFO:teuthology.orchestra.run.vm04.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T15:02:32.105 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph.log: 84.3% -- replaced with /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph.log.gz 2026-03-09T15:02:32.106 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/cephadm.log: 70.7% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T15:02:32.107 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-09T15:02:32.107 INFO:teuthology.orchestra.run.vm04.stderr:real 0m0.013s 2026-03-09T15:02:32.107 INFO:teuthology.orchestra.run.vm04.stderr:user 0m0.005s 2026-03-09T15:02:32.107 INFO:teuthology.orchestra.run.vm04.stderr:sys 0m0.013s 2026-03-09T15:02:32.110 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mgr.x.log 2026-03-09T15:02:32.110 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph.audit.log: 88.8% -- replaced with /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph.audit.log.gz 2026-03-09T15:02:32.111 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph.cephadm.log 2026-03-09T15:02:32.118 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mgr.x.log: gzip -5 --verbose -- /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-volume.log 2026-03-09T15:02:32.118 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph.cephadm.log: 76.0% -- replaced with /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph.cephadm.log.gz 2026-03-09T15:02:32.121 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-osd.0.log 2026-03-09T15:02:32.129 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-volume.log: 95.1% -- replaced with /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-volume.log.gz 2026-03-09T15:02:32.129 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-osd.1.log 2026-03-09T15:02:32.135 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-osd.0.log: 89.5% -- replaced with /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mgr.x.log.gz 2026-03-09T15:02:32.135 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mds.cephfs.vm03.prsrzf.log 2026-03-09T15:02:32.141 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mds.cephfs.vm03.lcbhgx.log 2026-03-09T15:02:32.141 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mds.cephfs.vm03.prsrzf.log: 78.1% -- replaced with /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mds.cephfs.vm03.prsrzf.log.gz 2026-03-09T15:02:32.144 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mds.cephfs.vm03.lcbhgx.log: 70.4% -- replaced with /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mds.cephfs.vm03.lcbhgx.log.gz 2026-03-09T15:02:32.195 INFO:teuthology.orchestra.run.vm03.stderr: 91.1% -- replaced with /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-mon.a.log.gz 2026-03-09T15:02:32.226 INFO:teuthology.orchestra.run.vm03.stderr: 95.1% -- replaced with /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-osd.1.log.gz 2026-03-09T15:02:32.232 INFO:teuthology.orchestra.run.vm03.stderr: 95.3% -- replaced with /var/log/ceph/6884f6b8-1bc8-11f1-a1b7-432e3f447ddd/ceph-osd.0.log.gz 2026-03-09T15:02:32.234 INFO:teuthology.orchestra.run.vm03.stderr: 2026-03-09T15:02:32.234 INFO:teuthology.orchestra.run.vm03.stderr:real 0m0.142s 2026-03-09T15:02:32.234 INFO:teuthology.orchestra.run.vm03.stderr:user 0m0.243s 2026-03-09T15:02:32.234 INFO:teuthology.orchestra.run.vm03.stderr:sys 0m0.024s 2026-03-09T15:02:32.235 INFO:tasks.cephadm:Archiving logs... 2026-03-09T15:02:32.235 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/log/ceph to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/516/remote/vm03/log 2026-03-09T15:02:32.235 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T15:02:32.317 DEBUG:teuthology.misc:Transferring archived files from vm04:/var/log/ceph to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/516/remote/vm04/log 2026-03-09T15:02:32.317 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T15:02:32.342 INFO:tasks.cephadm:Removing cluster... 2026-03-09T15:02:32.343 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd --force 2026-03-09T15:02:32.492 INFO:teuthology.orchestra.run.vm03.stdout:Deleting cluster with fsid: 6884f6b8-1bc8-11f1-a1b7-432e3f447ddd 2026-03-09T15:02:32.597 INFO:tasks.cephadm:Removing cephadm ... 2026-03-09T15:02:32.597 DEBUG:teuthology.orchestra.run.vm03:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T15:02:32.615 DEBUG:teuthology.orchestra.run.vm04:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T15:02:32.631 INFO:tasks.cephadm:Teardown complete 2026-03-09T15:02:32.631 DEBUG:teuthology.run_tasks:Unwinding manager cephadm.deploy_samba_ad_dc 2026-03-09T15:02:32.655 DEBUG:teuthology.orchestra.run.vm04:> sudo podman stop samba-ad 2026-03-09T15:02:32.900 INFO:teuthology.orchestra.run.vm04.stdout:samba-ad 2026-03-09T15:02:32.906 DEBUG:teuthology.orchestra.run.vm04:> sudo podman rm samba-ad 2026-03-09T15:02:32.961 INFO:teuthology.orchestra.run.vm04.stdout:samba-ad 2026-03-09T15:02:32.964 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -rf /var/lib/samba/container/logs /var/lib/samba/container/data 2026-03-09T15:02:32.987 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-09T15:02:32.990 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-09T15:02:32.990 DEBUG:teuthology.orchestra.run.vm03:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T15:02:32.991 DEBUG:teuthology.orchestra.run.vm04:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T15:02:33.004 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-09T15:02:33.009 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T15:02:33.009 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-09T15:02:33.009 INFO:teuthology.orchestra.run.vm03.stdout:^+ ntp5.kernfusion.at 2 6 377 33 -342us[ -342us] +/- 17ms 2026-03-09T15:02:33.009 INFO:teuthology.orchestra.run.vm03.stdout:^- 217.160.19.219 2 6 377 32 +2221us[+2221us] +/- 54ms 2026-03-09T15:02:33.009 INFO:teuthology.orchestra.run.vm03.stdout:^- hatkeininter.net 2 6 37 32 -512us[ -512us] +/- 48ms 2026-03-09T15:02:33.009 INFO:teuthology.orchestra.run.vm03.stdout:^* node-2.infogral.is 2 6 377 33 -18us[ +33us] +/- 16ms 2026-03-09T15:02:33.042 INFO:teuthology.orchestra.run.vm04.stderr:bash: line 1: ntpq: command not found 2026-03-09T15:02:33.046 INFO:teuthology.orchestra.run.vm04.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T15:02:33.046 INFO:teuthology.orchestra.run.vm04.stdout:=============================================================================== 2026-03-09T15:02:33.046 INFO:teuthology.orchestra.run.vm04.stdout:^? spacys.de 2 7 40 298 -231us[ +479us] +/- 46ms 2026-03-09T15:02:33.046 INFO:teuthology.orchestra.run.vm04.stdout:^* node-2.infogral.is 2 6 375 33 +57us[ +97us] +/- 16ms 2026-03-09T15:02:33.046 INFO:teuthology.orchestra.run.vm04.stdout:^+ ntp5.kernfusion.at 2 6 377 33 -198us[ -198us] +/- 17ms 2026-03-09T15:02:33.046 INFO:teuthology.orchestra.run.vm04.stdout:^- 217.160.19.219 2 6 377 32 +2338us[+2338us] +/- 54ms 2026-03-09T15:02:33.046 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-09T15:02:33.050 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-09T15:02:33.050 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-09T15:02:33.053 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-09T15:02:33.057 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-09T15:02:33.059 INFO:teuthology.task.internal:Duration was 501.335510 seconds 2026-03-09T15:02:33.059 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-09T15:02:33.062 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-09T15:02:33.062 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T15:02:33.063 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T15:02:33.104 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T15:02:33.130 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T15:02:33.610 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-09T15:02:33.610 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm03.local 2026-03-09T15:02:33.610 DEBUG:teuthology.orchestra.run.vm03:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T15:02:33.679 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm04.local 2026-03-09T15:02:33.679 DEBUG:teuthology.orchestra.run.vm04:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T15:02:33.706 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-09T15:02:33.706 DEBUG:teuthology.orchestra.run.vm03:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T15:02:33.722 DEBUG:teuthology.orchestra.run.vm04:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T15:02:34.370 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-09T15:02:34.370 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T15:02:34.372 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T15:02:34.395 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T15:02:34.396 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T15:02:34.396 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T15:02:34.396 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T15:02:34.396 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: /home/ubuntu/cephtest/archive/syslog/journalctl.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T15:02:34.400 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T15:02:34.401 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T15:02:34.401 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T15:02:34.401 INFO:teuthology.orchestra.run.vm04.stderr: -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T15:02:34.402 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T15:02:34.565 INFO:teuthology.orchestra.run.vm03.stderr: 98.1% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T15:02:34.620 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.5% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T15:02:34.622 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-09T15:02:34.625 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-09T15:02:34.625 DEBUG:teuthology.orchestra.run.vm03:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T15:02:34.650 DEBUG:teuthology.orchestra.run.vm04:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T15:02:34.692 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-09T15:02:34.696 DEBUG:teuthology.orchestra.run.vm03:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T15:02:34.697 DEBUG:teuthology.orchestra.run.vm04:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T15:02:34.722 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = core 2026-03-09T15:02:34.758 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = core 2026-03-09T15:02:34.771 DEBUG:teuthology.orchestra.run.vm03:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T15:02:34.790 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:02:34.791 DEBUG:teuthology.orchestra.run.vm04:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T15:02:34.826 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:02:34.827 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-09T15:02:34.829 INFO:teuthology.task.internal:Transferring archived files... 2026-03-09T15:02:34.830 DEBUG:teuthology.misc:Transferring archived files from vm03:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/516/remote/vm03 2026-03-09T15:02:34.830 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T15:02:34.861 DEBUG:teuthology.misc:Transferring archived files from vm04:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/516/remote/vm04 2026-03-09T15:02:34.861 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T15:02:34.902 INFO:teuthology.task.internal:Removing archive directory... 2026-03-09T15:02:34.902 DEBUG:teuthology.orchestra.run.vm03:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T15:02:34.903 DEBUG:teuthology.orchestra.run.vm04:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T15:02:34.959 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-09T15:02:34.961 INFO:teuthology.task.internal:Not uploading archives. 2026-03-09T15:02:34.961 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-09T15:02:35.003 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-09T15:02:35.003 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T15:02:35.005 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T15:02:35.020 INFO:teuthology.orchestra.run.vm03.stdout: 8532144 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 15:02 /home/ubuntu/cephtest 2026-03-09T15:02:35.024 INFO:teuthology.orchestra.run.vm04.stdout: 8532143 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 15:02 /home/ubuntu/cephtest 2026-03-09T15:02:35.026 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-09T15:02:35.037 INFO:teuthology.run:Summary data: description: orch/cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_domain} duration: 501.33551025390625 owner: kyr success: true 2026-03-09T15:02:35.037 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T15:02:35.059 INFO:teuthology.run:pass