2026-03-09T16:37:07.990 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-09T16:37:07.995 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T16:37:08.014 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/555 branch: squid description: orch/cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_host_drain} email: null first_in_suite: false flavor: default job_id: '555' last_in_suite: false machine_type: vps name: kyr-2026-03-09_11:23:05-orch-squid-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: global: mon election default strategy: 3 mgr: debug mgr: 20 debug ms: 1 mgr/cephadm/use_agent: true mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - MON_DOWN - mons down - mon down - out of quorum - CEPHADM_STRAY_HOST - CEPHADM_STRAY_DAEMON - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 workunit: branch: tt-squid sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - mon.a - mgr.a - osd.0 - osd.1 - - host.b - mon.b - mgr.b - osd.2 - osd.3 - - host.c - mon.c - osd.4 - osd.5 seed: 3443 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 targets: vm01.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIM6b9bmpMBvYMFx0U4HKJgOVkksYUfMqYNvRGMWl7GAA3J/WEMOvtKhfphSfoxZSbmwDAPgRO9mz94S5bSWhow= vm04.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKGhpkl0tnpImBVNjOvt0AY2drE9nsisHUC8YFm1MS9hfZxPugdFlWHd+BdAmQQ3Eu37UcU04KFJivtVrze2NHk= vm05.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBD5P9fXyDJ3dm1ghlkkYqvi5cbkTWKtHQ9UxfUyeJkcK440kdnwIckoM0/+Xqt4JicaugRZphyBURZzwSg0zqzQ= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install runc nvmetcli nvme-cli -y - sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf - sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf - install: null - cephadm: null - cephadm.shell: host.a: - "set -ex\nHOSTNAMES=$(ceph orch host ls --format json | jq -r '.[] | .hostname')\n\ for host in $HOSTNAMES; do\n # find the hostname for \"host.c\" which will\ \ have no mgr\n HAS_MGRS=$(ceph orch ps --hostname ${host} --format json |\ \ jq 'any(.daemon_type == \"mgr\")')\n if [ \"$HAS_MGRS\" == \"false\" ]; then\n\ \ HOST_C=\"${host}\"\n fi\ndone\n# One last thing to worry about before\ \ draining the host\n# is that the teuthology test tends to put the explicit\n\ # hostnames in the placement for the mon service.\n# We want to make sure we\ \ can drain without providing\n# --force and there is a check for the host being\ \ removed\n# being listed explicitly in the placements. Therefore,\n# we should\ \ remove it from the mon placement.\nceph orch ls mon --export > mon.yaml\n\ sed /\"$HOST_C\"/d mon.yaml > mon_adjusted.yaml\nceph orch apply -i mon_adjusted.yaml\n\ # now drain that host\nceph orch host drain $HOST_C --zap-osd-devices\n# wait\ \ for drain to complete\nHOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\n\ while [ \"$HOST_C_DAEMONS\" != \"No daemons reported\" ]; do\n sleep 15\n \ \ HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\ndone\n# we want to check\ \ the ability to remove the host from\n# the CRUSH map, so we should first verify\ \ the host is in\n# the CRUSH map.\nceph osd getcrushmap -o compiled-crushmap\n\ crushtool -d compiled-crushmap -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\n\ if ! grep -q \"$HOST_C\" <<< \"$CRUSH_MAP\"; then\n printf \"Expected to see\ \ $HOST_C in CRUSH map. Saw:\\n\\n$CRUSH_MAP\"\n exit 1\nfi\n# If the drain\ \ was successful, we should be able to remove the\n# host without force with\ \ no issues. If there are still daemons\n# we will get a response telling us\ \ to drain the host and a\n# non-zero return code\nceph orch host rm $HOST_C\ \ --rm-crush-entry\n# verify we've successfully removed the host from the CRUSH\ \ map\nsleep 30\nceph osd getcrushmap -o compiled-crushmap\ncrushtool -d compiled-crushmap\ \ -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\nif grep -q \"$HOST_C\" <<<\ \ \"$CRUSH_MAP\"; then\n printf \"Saw $HOST_C in CRUSH map after it should\ \ have been removed.\\n\\n$CRUSH_MAP\"\n exit 1\nfi\n" teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-09_11:23:05 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-09T16:37:08.014 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa; will attempt to use it 2026-03-09T16:37:08.015 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks 2026-03-09T16:37:08.015 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-09T16:37:08.015 INFO:teuthology.task.internal:Checking packages... 2026-03-09T16:37:08.015 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-09T16:37:08.015 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-09T16:37:08.015 INFO:teuthology.packaging:ref: None 2026-03-09T16:37:08.015 INFO:teuthology.packaging:tag: None 2026-03-09T16:37:08.015 INFO:teuthology.packaging:branch: squid 2026-03-09T16:37:08.015 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T16:37:08.016 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-09T16:37:08.848 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-09T16:37:08.849 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-09T16:37:08.850 INFO:teuthology.task.internal:no buildpackages task found 2026-03-09T16:37:08.850 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-09T16:37:08.850 INFO:teuthology.task.internal:Saving configuration 2026-03-09T16:37:08.855 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-09T16:37:08.856 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-09T16:37:08.862 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm01.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/555', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 16:35:34.930014', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:01', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIM6b9bmpMBvYMFx0U4HKJgOVkksYUfMqYNvRGMWl7GAA3J/WEMOvtKhfphSfoxZSbmwDAPgRO9mz94S5bSWhow='} 2026-03-09T16:37:08.867 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm04.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/555', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 16:35:34.930470', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:04', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKGhpkl0tnpImBVNjOvt0AY2drE9nsisHUC8YFm1MS9hfZxPugdFlWHd+BdAmQQ3Eu37UcU04KFJivtVrze2NHk='} 2026-03-09T16:37:08.871 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm05.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/555', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 16:35:34.930719', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:05', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBD5P9fXyDJ3dm1ghlkkYqvi5cbkTWKtHQ9UxfUyeJkcK440kdnwIckoM0/+Xqt4JicaugRZphyBURZzwSg0zqzQ='} 2026-03-09T16:37:08.871 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-09T16:37:08.872 INFO:teuthology.task.internal:roles: ubuntu@vm01.local - ['host.a', 'mon.a', 'mgr.a', 'osd.0', 'osd.1'] 2026-03-09T16:37:08.872 INFO:teuthology.task.internal:roles: ubuntu@vm04.local - ['host.b', 'mon.b', 'mgr.b', 'osd.2', 'osd.3'] 2026-03-09T16:37:08.872 INFO:teuthology.task.internal:roles: ubuntu@vm05.local - ['host.c', 'mon.c', 'osd.4', 'osd.5'] 2026-03-09T16:37:08.872 INFO:teuthology.run_tasks:Running task console_log... 2026-03-09T16:37:08.877 DEBUG:teuthology.task.console_log:vm01 does not support IPMI; excluding 2026-03-09T16:37:08.882 DEBUG:teuthology.task.console_log:vm04 does not support IPMI; excluding 2026-03-09T16:37:08.887 DEBUG:teuthology.task.console_log:vm05 does not support IPMI; excluding 2026-03-09T16:37:08.887 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fb093dd3e20>, signals=[15]) 2026-03-09T16:37:08.887 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-09T16:37:08.888 INFO:teuthology.task.internal:Opening connections... 2026-03-09T16:37:08.888 DEBUG:teuthology.task.internal:connecting to ubuntu@vm01.local 2026-03-09T16:37:08.888 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T16:37:08.947 DEBUG:teuthology.task.internal:connecting to ubuntu@vm04.local 2026-03-09T16:37:08.947 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T16:37:09.004 DEBUG:teuthology.task.internal:connecting to ubuntu@vm05.local 2026-03-09T16:37:09.004 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm05.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T16:37:09.063 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-09T16:37:09.064 DEBUG:teuthology.orchestra.run.vm01:> uname -m 2026-03-09T16:37:09.083 INFO:teuthology.orchestra.run.vm01.stdout:x86_64 2026-03-09T16:37:09.083 DEBUG:teuthology.orchestra.run.vm01:> cat /etc/os-release 2026-03-09T16:37:09.139 INFO:teuthology.orchestra.run.vm01.stdout:NAME="CentOS Stream" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:VERSION="9" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:ID="centos" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:ID_LIKE="rhel fedora" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:VERSION_ID="9" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:PLATFORM_ID="platform:el9" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:ANSI_COLOR="0;31" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:LOGO="fedora-logo-icon" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:HOME_URL="https://centos.org/" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T16:37:09.140 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T16:37:09.140 INFO:teuthology.lock.ops:Updating vm01.local on lock server 2026-03-09T16:37:09.145 DEBUG:teuthology.orchestra.run.vm04:> uname -m 2026-03-09T16:37:09.160 INFO:teuthology.orchestra.run.vm04.stdout:x86_64 2026-03-09T16:37:09.161 DEBUG:teuthology.orchestra.run.vm04:> cat /etc/os-release 2026-03-09T16:37:09.215 INFO:teuthology.orchestra.run.vm04.stdout:NAME="CentOS Stream" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:VERSION="9" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:ID="centos" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:ID_LIKE="rhel fedora" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:VERSION_ID="9" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:PLATFORM_ID="platform:el9" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:ANSI_COLOR="0;31" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:LOGO="fedora-logo-icon" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:HOME_URL="https://centos.org/" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T16:37:09.216 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T16:37:09.216 INFO:teuthology.lock.ops:Updating vm04.local on lock server 2026-03-09T16:37:09.221 DEBUG:teuthology.orchestra.run.vm05:> uname -m 2026-03-09T16:37:09.235 INFO:teuthology.orchestra.run.vm05.stdout:x86_64 2026-03-09T16:37:09.235 DEBUG:teuthology.orchestra.run.vm05:> cat /etc/os-release 2026-03-09T16:37:09.289 INFO:teuthology.orchestra.run.vm05.stdout:NAME="CentOS Stream" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:VERSION="9" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:ID="centos" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:ID_LIKE="rhel fedora" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:VERSION_ID="9" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:PLATFORM_ID="platform:el9" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:ANSI_COLOR="0;31" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:LOGO="fedora-logo-icon" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:HOME_URL="https://centos.org/" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T16:37:09.290 INFO:teuthology.orchestra.run.vm05.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T16:37:09.290 INFO:teuthology.lock.ops:Updating vm05.local on lock server 2026-03-09T16:37:09.294 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-09T16:37:09.296 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-09T16:37:09.297 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-09T16:37:09.297 DEBUG:teuthology.orchestra.run.vm01:> test '!' -e /home/ubuntu/cephtest 2026-03-09T16:37:09.299 DEBUG:teuthology.orchestra.run.vm04:> test '!' -e /home/ubuntu/cephtest 2026-03-09T16:37:09.300 DEBUG:teuthology.orchestra.run.vm05:> test '!' -e /home/ubuntu/cephtest 2026-03-09T16:37:09.344 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-09T16:37:09.345 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-09T16:37:09.345 DEBUG:teuthology.orchestra.run.vm01:> test -z $(ls -A /var/lib/ceph) 2026-03-09T16:37:09.356 DEBUG:teuthology.orchestra.run.vm04:> test -z $(ls -A /var/lib/ceph) 2026-03-09T16:37:09.358 DEBUG:teuthology.orchestra.run.vm05:> test -z $(ls -A /var/lib/ceph) 2026-03-09T16:37:09.370 INFO:teuthology.orchestra.run.vm04.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T16:37:09.371 INFO:teuthology.orchestra.run.vm01.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T16:37:09.399 INFO:teuthology.orchestra.run.vm05.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T16:37:09.399 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-09T16:37:09.407 DEBUG:teuthology.orchestra.run.vm01:> test -e /ceph-qa-ready 2026-03-09T16:37:09.428 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:37:09.617 DEBUG:teuthology.orchestra.run.vm04:> test -e /ceph-qa-ready 2026-03-09T16:37:09.633 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:37:09.827 DEBUG:teuthology.orchestra.run.vm05:> test -e /ceph-qa-ready 2026-03-09T16:37:09.841 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:37:10.024 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-09T16:37:10.025 INFO:teuthology.task.internal:Creating test directory... 2026-03-09T16:37:10.025 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T16:37:10.027 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T16:37:10.029 DEBUG:teuthology.orchestra.run.vm05:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T16:37:10.043 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-09T16:37:10.045 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-09T16:37:10.046 INFO:teuthology.task.internal:Creating archive directory... 2026-03-09T16:37:10.046 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T16:37:10.086 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T16:37:10.087 DEBUG:teuthology.orchestra.run.vm05:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T16:37:10.105 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-09T16:37:10.106 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-09T16:37:10.106 DEBUG:teuthology.orchestra.run.vm01:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T16:37:10.158 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:37:10.159 DEBUG:teuthology.orchestra.run.vm04:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T16:37:10.172 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:37:10.173 DEBUG:teuthology.orchestra.run.vm05:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T16:37:10.188 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:37:10.188 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T16:37:10.201 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T16:37:10.215 DEBUG:teuthology.orchestra.run.vm05:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T16:37:10.226 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T16:37:10.236 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T16:37:10.241 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T16:37:10.251 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T16:37:10.255 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T16:37:10.265 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T16:37:10.267 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-09T16:37:10.268 INFO:teuthology.task.internal:Configuring sudo... 2026-03-09T16:37:10.268 DEBUG:teuthology.orchestra.run.vm01:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T16:37:10.280 DEBUG:teuthology.orchestra.run.vm04:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T16:37:10.295 DEBUG:teuthology.orchestra.run.vm05:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T16:37:10.331 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-09T16:37:10.334 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-09T16:37:10.334 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T16:37:10.347 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T16:37:10.363 DEBUG:teuthology.orchestra.run.vm05:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T16:37:10.386 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T16:37:10.428 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T16:37:10.483 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-09T16:37:10.483 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T16:37:10.546 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T16:37:10.569 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T16:37:10.626 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-09T16:37:10.626 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T16:37:10.685 DEBUG:teuthology.orchestra.run.vm05:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T16:37:10.706 DEBUG:teuthology.orchestra.run.vm05:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T16:37:10.763 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-09T16:37:10.763 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T16:37:10.821 DEBUG:teuthology.orchestra.run.vm01:> sudo service rsyslog restart 2026-03-09T16:37:10.824 DEBUG:teuthology.orchestra.run.vm04:> sudo service rsyslog restart 2026-03-09T16:37:10.826 DEBUG:teuthology.orchestra.run.vm05:> sudo service rsyslog restart 2026-03-09T16:37:10.854 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T16:37:10.858 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T16:37:10.892 INFO:teuthology.orchestra.run.vm05.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T16:37:11.351 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-09T16:37:11.353 INFO:teuthology.task.internal:Starting timer... 2026-03-09T16:37:11.353 INFO:teuthology.run_tasks:Running task pcp... 2026-03-09T16:37:11.355 INFO:teuthology.run_tasks:Running task selinux... 2026-03-09T16:37:11.358 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0']} 2026-03-09T16:37:11.358 INFO:teuthology.task.selinux:Excluding vm01: VMs are not yet supported 2026-03-09T16:37:11.358 INFO:teuthology.task.selinux:Excluding vm04: VMs are not yet supported 2026-03-09T16:37:11.358 INFO:teuthology.task.selinux:Excluding vm05: VMs are not yet supported 2026-03-09T16:37:11.358 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-09T16:37:11.358 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-09T16:37:11.358 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-09T16:37:11.358 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-09T16:37:11.360 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-09T16:37:11.360 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-09T16:37:11.361 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-09T16:37:11.968 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-09T16:37:11.973 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-09T16:37:11.974 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventory6gm5ef3b --limit vm01.local,vm04.local,vm05.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-09T16:38:10.713 INFO:teuthology.task.ansible:Archiving ansible failure log at: /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/555/ansible_failures.yaml 2026-03-09T16:38:10.713 ERROR:teuthology.run_tasks:Saw exception from tasks. Traceback (most recent call last): File "/home/teuthos/teuthology/teuthology/run_tasks.py", line 112, in run_tasks manager.__enter__() File "/home/teuthos/teuthology/teuthology/task/__init__.py", line 123, in __enter__ self.begin() File "/home/teuthos/teuthology/teuthology/task/ansible.py", line 490, in begin super(CephLab, self).begin() File "/home/teuthos/teuthology/teuthology/task/ansible.py", line 324, in begin self.execute_playbook() File "/home/teuthos/teuthology/teuthology/task/ansible.py", line 356, in execute_playbook self._handle_failure(command, status) File "/home/teuthos/teuthology/teuthology/task/ansible.py", line 384, in _handle_failure raise AnsibleFailedError(failures) teuthology.exceptions.AnsibleFailedError: vm04.local: _ansible_no_log: false changed: false invocation: module_args: allow_downgrade: false allowerasing: false autoremove: false best: null bugfix: false cacheonly: false conf_file: null disable_excludes: null disable_gpg_check: false disable_plugin: [] disablerepo: [] download_dir: null download_only: false enable_plugin: [] enablerepo: [] exclude: [] install_repoquery: true install_weak_deps: true installroot: / list: null lock_timeout: 30 name: - ceph - ceph-base - ceph-selinux - ceph-common - ceph-debuginfo - ceph-release - libcephfs1 - ceph-radosgw - python-ceph - python-rados - python-rbd - python-cephfs - librbd1 - librados2 - mod_fastcgi nobest: null releasever: null security: false skip_broken: false sslverify: true state: absent update_cache: false update_only: false use_backend: auto validate_certs: true msg: 'Failed to download metadata for repo ''baseos'': Cannot download repomd.xml: Cannot download repodata/repomd.xml: All mirrors were tried' rc: 1 results: []vm05.local: _ansible_no_log: false changed: false invocation: module_args: allow_downgrade: false allowerasing: false autoremove: false best: null bugfix: false cacheonly: false conf_file: null disable_excludes: null disable_gpg_check: false disable_plugin: [] disablerepo: [] download_dir: null download_only: false enable_plugin: [] enablerepo: [] exclude: [] install_repoquery: true install_weak_deps: true installroot: / list: null lock_timeout: 30 name: - ceph - ceph-base - ceph-selinux - ceph-common - ceph-debuginfo - ceph-release - libcephfs1 - ceph-radosgw - python-ceph - python-rados - python-rbd - python-cephfs - librbd1 - librados2 - mod_fastcgi nobest: null releasever: null security: false skip_broken: false sslverify: true state: absent update_cache: false update_only: false use_backend: auto validate_certs: true msg: 'Failed to download metadata for repo ''baseos'': Cannot download repomd.xml: Cannot download repodata/repomd.xml: All mirrors were tried' rc: 1 results: []vm01.local: _ansible_no_log: false changed: false invocation: module_args: allow_downgrade: false allowerasing: false autoremove: false best: null bugfix: false cacheonly: false conf_file: null disable_excludes: null disable_gpg_check: false disable_plugin: [] disablerepo: [] download_dir: null download_only: false enable_plugin: [] enablerepo: [] exclude: [] install_repoquery: true install_weak_deps: true installroot: / list: null lock_timeout: 30 name: - ceph - ceph-base - ceph-selinux - ceph-common - ceph-debuginfo - ceph-release - libcephfs1 - ceph-radosgw - python-ceph - python-rados - python-rbd - python-cephfs - librbd1 - librados2 - mod_fastcgi nobest: null releasever: null security: false skip_broken: false sslverify: true state: absent update_cache: false update_only: false use_backend: auto validate_certs: true msg: 'Failed to download metadata for repo ''baseos'': Cannot download repomd.xml: Cannot download repodata/repomd.xml: All mirrors were tried' rc: 1 results: [] 2026-03-09T16:38:10.713 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-09T16:38:10.715 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-09T16:38:10.715 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-09T16:38:10.717 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-09T16:38:10.718 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-09T16:38:10.720 INFO:teuthology.task.internal:Duration was 59.366984 seconds 2026-03-09T16:38:10.720 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-09T16:38:10.722 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-09T16:38:10.722 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T16:38:10.724 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T16:38:10.725 DEBUG:teuthology.orchestra.run.vm05:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T16:38:10.778 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T16:38:10.779 INFO:teuthology.orchestra.run.vm05.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T16:38:10.782 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T16:38:21.634 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-09T16:38:21.634 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm01.local 2026-03-09T16:38:21.634 DEBUG:teuthology.orchestra.run.vm01:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T16:38:21.671 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm04.local 2026-03-09T16:38:21.671 DEBUG:teuthology.orchestra.run.vm04:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T16:38:21.720 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm05.local 2026-03-09T16:38:21.720 DEBUG:teuthology.orchestra.run.vm05:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T16:38:21.764 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-09T16:38:21.764 DEBUG:teuthology.orchestra.run.vm01:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T16:38:21.766 DEBUG:teuthology.orchestra.run.vm04:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T16:38:21.768 DEBUG:teuthology.orchestra.run.vm05:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T16:38:21.915 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-09T16:38:21.915 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T16:38:21.917 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T16:38:21.919 DEBUG:teuthology.orchestra.run.vm05:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T16:38:21.947 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T16:38:21.947 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T16:38:21.948 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T16:38:21.948 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T16:38:21.949 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T16:38:21.950 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T16:38:21.951 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T16:38:21.952 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T16:38:21.952 INFO:teuthology.orchestra.run.vm05.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: /home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T16:38:21.952 INFO:teuthology.orchestra.run.vm05.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T16:38:21.953 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T16:38:21.954 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T16:38:21.954 INFO:teuthology.orchestra.run.vm01.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T16:38:21.955 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T16:38:21.955 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T16:38:21.981 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T16:38:21.987 INFO:teuthology.orchestra.run.vm05.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.1% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T16:38:21.988 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T16:38:21.992 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-09T16:38:21.994 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-09T16:38:21.994 DEBUG:teuthology.orchestra.run.vm01:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T16:38:22.062 DEBUG:teuthology.orchestra.run.vm04:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T16:38:22.098 DEBUG:teuthology.orchestra.run.vm05:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T16:38:22.135 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-09T16:38:22.137 DEBUG:teuthology.orchestra.run.vm01:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T16:38:22.139 DEBUG:teuthology.orchestra.run.vm04:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T16:38:22.141 DEBUG:teuthology.orchestra.run.vm05:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T16:38:22.172 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = core 2026-03-09T16:38:22.179 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = core 2026-03-09T16:38:22.211 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern = core 2026-03-09T16:38:22.226 DEBUG:teuthology.orchestra.run.vm01:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T16:38:22.259 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:38:22.259 DEBUG:teuthology.orchestra.run.vm04:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T16:38:22.282 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:38:22.282 DEBUG:teuthology.orchestra.run.vm05:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T16:38:22.305 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:38:22.305 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-09T16:38:22.307 INFO:teuthology.task.internal:Transferring archived files... 2026-03-09T16:38:22.307 DEBUG:teuthology.misc:Transferring archived files from vm01:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/555/remote/vm01 2026-03-09T16:38:22.307 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T16:38:22.350 DEBUG:teuthology.misc:Transferring archived files from vm04:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/555/remote/vm04 2026-03-09T16:38:22.350 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T16:38:22.389 DEBUG:teuthology.misc:Transferring archived files from vm05:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/555/remote/vm05 2026-03-09T16:38:22.389 DEBUG:teuthology.orchestra.run.vm05:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T16:38:22.421 INFO:teuthology.task.internal:Removing archive directory... 2026-03-09T16:38:22.421 DEBUG:teuthology.orchestra.run.vm01:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T16:38:22.426 DEBUG:teuthology.orchestra.run.vm04:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T16:38:22.433 DEBUG:teuthology.orchestra.run.vm05:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T16:38:22.486 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-09T16:38:22.489 INFO:teuthology.task.internal:Not uploading archives. 2026-03-09T16:38:22.489 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-09T16:38:22.491 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-09T16:38:22.491 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T16:38:22.493 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T16:38:22.494 DEBUG:teuthology.orchestra.run.vm05:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T16:38:22.515 INFO:teuthology.orchestra.run.vm01.stdout: 71303424 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 16:38 /home/ubuntu/cephtest 2026-03-09T16:38:22.519 INFO:teuthology.orchestra.run.vm04.stdout: 71303424 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 16:38 /home/ubuntu/cephtest 2026-03-09T16:38:22.555 INFO:teuthology.orchestra.run.vm05.stdout: 71303424 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 16:38 /home/ubuntu/cephtest 2026-03-09T16:38:22.556 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-09T16:38:22.564 INFO:teuthology.run:Summary data: description: orch/cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_host_drain} duration: 59.366984128952026 failure_reason: 'vm04.local: _ansible_no_log: false changed: false invocation: module_args: allow_downgrade: false allowerasing: false autoremove: false best: null bugfix: false cacheonly: false conf_file: null disable_excludes: null disable_gpg_check: false disable_plugin: [] disablerepo: [] download_dir: null download_only: false enable_plugin: [] enablerepo: [] exclude: [] install_repoquery: true install_weak_deps: true installroot: / list: null lock_timeout: 30 name: - ceph - ceph-base - ceph-selinux - ceph-common - ceph-debuginfo - ceph-release - libcephfs1 - ceph-radosgw - python-ceph - python-rados - python-rbd - python-cephfs - librbd1 - librados2 - mod_fastcgi nobest: null releasever: null security: false skip_broken: false sslverify: true state: absent update_cache: false update_only: false use_backend: auto validate_certs: true msg: ''Failed to download metadata for repo ''''baseos'''': Cannot download repomd.xml: Cannot download repodata/repomd.xml: All mirrors were tried'' rc: 1 results: []vm05.local: _ansible_no_log: false changed: false invocation: module_args: allow_downgrade: false allowerasing: false autoremove: false best: null bugfix: false cacheonly: false conf_file: null disable_excludes: null disable_gpg_check: false disable_plugin: [] disablerepo: [] download_dir: null download_only: false enable_plugin: [] enablerepo: [] exclude: [] install_repoquery: true install_weak_deps: true installroot: / list: null lock_timeout: 30 name: - ceph - ceph-base - ceph-selinux - ceph-common - ceph-debuginfo - ceph-release - libcephfs1 - ceph-radosgw - python-ceph - python-rados - python-rbd - python-cephfs - librbd1 - librados2 - mod_fastcgi nobest: null releasever: null security: false skip_broken: false sslverify: true state: absent update_cache: false update_only: false use_backend: auto validate_certs: true msg: ''Failed to download metadata for repo ''''baseos'''': Cannot download repomd.xml: Cannot download repodata/repomd.xml: All mirrors were tried'' rc: 1 results: []vm01.local: _ansible_no_log: false changed: false invocation: module_args: allow_downgrade: false allowerasing: false autoremove: false best: null bugfix: false cacheonly: false conf_file: null disable_excludes: null disable_gpg_check: false disable_plugin: [] disablerepo: [] download_dir: null download_only: false enable_plugin: [] enablerepo: [] exclude: [] install_repoquery: true install_weak_deps: true installroot: / list: null lock_timeout: 30 name: - ceph - ceph-base - ceph-selinux - ceph-common - ceph-debuginfo - ceph-release - libcephfs1 - ceph-radosgw - python-ceph - python-rados - python-rbd - python-cephfs - librbd1 - librados2 - mod_fastcgi nobest: null releasever: null security: false skip_broken: false sslverify: true state: absent update_cache: false update_only: false use_backend: auto validate_certs: true msg: ''Failed to download metadata for repo ''''baseos'''': Cannot download repomd.xml: Cannot download repodata/repomd.xml: All mirrors were tried'' rc: 1 results: []' owner: kyr sentry_event: null status: dead success: false 2026-03-09T16:38:22.565 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T16:38:22.587 INFO:teuthology.run:DEAD