2026-03-09T16:35:34.599 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-09T16:35:34.604 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T16:35:34.623 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/554 branch: squid description: orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} email: null first_in_suite: false flavor: default job_id: '554' ktype: distro last_in_suite: false machine_type: vps name: kyr-2026-03-09_11:23:05-orch-squid-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: ubuntu os_version: '22.04' overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath workunit: branch: tt-squid sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 3443 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 targets: vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKKNESTDl4RnaxdOl6nZXVAvhDd1LZudluneAdupXXIlDFTEo3pCIQz1ZzdQGUTzoTxBorPgokPpqIX1Vaz17Ec= vm09.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE7243rD6w2PQdOLSzUgNmFDIcEboohEMFJvqAedLwcNUgJCs0RwqeLNkMxfgi0xrNAjA4I1zvndfwX6ZdSY0u4= tasks: - nvme_loop: null - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip: null - cephadm.shell: host.a: - ceph orch device ls --refresh - cephadm.apply: specs: - placement: count: 4 host_pattern: '*' service_id: foo service_type: rgw spec: rgw_frontend_port: 8000 - placement: count: 2 service_id: rgw.foo service_type: ingress spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: '{{VIP0}}/{{VIPPREFIXLEN}}' - cephadm.wait_for_service: service: rgw.foo - cephadm.wait_for_service: service: ingress.rgw.foo - cephadm.shell: host.a: - "echo \"Check while healthy...\"\ncurl http://{{VIP0}}:9000/\n\n# stop each\ \ rgw in turn\necho \"Check with each rgw stopped in turn...\"\nfor rgw in `ceph\ \ orch ps | grep ^rgw.foo. | awk '{print $1}'`; do\n ceph orch daemon stop\ \ $rgw\n timeout 300 bash -c \"while ! ceph orch ps | grep $rgw | grep stopped;\ \ do echo 'Waiting for $rgw to stop'; ceph orch ps --daemon-type rgw; ceph health\ \ detail; sleep 5 ; done\"\n timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/\ \ ; do echo 'Waiting for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\ \n ceph orch daemon start $rgw\n timeout 300 bash -c \"while ! ceph orch ps\ \ | grep $rgw | grep running; do echo 'Waiting for $rgw to start'; ceph orch\ \ ps --daemon-type rgw; ceph health detail; sleep 5 ; done\"\ndone\n\n# stop\ \ each haproxy in turn\necho \"Check with each haproxy down in turn...\"\nfor\ \ haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do\n\ \ ceph orch daemon stop $haproxy\n timeout 300 bash -c \"while ! ceph orch\ \ ps | grep $haproxy | grep stopped; do echo 'Waiting for $haproxy to stop';\ \ ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\n\ \ timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting\ \ for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\n ceph orch\ \ daemon start $haproxy\n timeout 300 bash -c \"while ! ceph orch ps | grep\ \ $haproxy | grep running; do echo 'Waiting for $haproxy to start'; ceph orch\ \ ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\ndone\n\ntimeout\ \ 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting for http://{{VIP0}}:9000/\ \ to be available'; sleep 1 ; done\"\n" - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-09_11:23:05 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-09T16:35:34.623 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa; will attempt to use it 2026-03-09T16:35:34.623 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks 2026-03-09T16:35:34.623 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-09T16:35:34.624 INFO:teuthology.task.internal:Checking packages... 2026-03-09T16:35:34.624 INFO:teuthology.task.internal:Checking packages for os_type 'ubuntu', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-09T16:35:34.624 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-09T16:35:34.624 INFO:teuthology.packaging:ref: None 2026-03-09T16:35:34.624 INFO:teuthology.packaging:tag: None 2026-03-09T16:35:34.624 INFO:teuthology.packaging:branch: squid 2026-03-09T16:35:34.624 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T16:35:34.624 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=ubuntu%2F22.04%2Fx86_64&ref=squid 2026-03-09T16:35:35.253 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678-ge911bdeb-1jammy 2026-03-09T16:35:35.254 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-09T16:35:35.255 INFO:teuthology.task.internal:no buildpackages task found 2026-03-09T16:35:35.255 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-09T16:35:35.255 INFO:teuthology.task.internal:Saving configuration 2026-03-09T16:35:35.260 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-09T16:35:35.261 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-09T16:35:35.268 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm06.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/554', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'ubuntu', 'os_version': '22.04', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 16:34:31.521921', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:06', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKKNESTDl4RnaxdOl6nZXVAvhDd1LZudluneAdupXXIlDFTEo3pCIQz1ZzdQGUTzoTxBorPgokPpqIX1Vaz17Ec='} 2026-03-09T16:35:35.272 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm09.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/554', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'ubuntu', 'os_version': '22.04', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 16:34:31.522313', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:09', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE7243rD6w2PQdOLSzUgNmFDIcEboohEMFJvqAedLwcNUgJCs0RwqeLNkMxfgi0xrNAjA4I1zvndfwX6ZdSY0u4='} 2026-03-09T16:35:35.272 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-09T16:35:35.273 INFO:teuthology.task.internal:roles: ubuntu@vm06.local - ['host.a', 'client.0'] 2026-03-09T16:35:35.273 INFO:teuthology.task.internal:roles: ubuntu@vm09.local - ['host.b', 'client.1'] 2026-03-09T16:35:35.273 INFO:teuthology.run_tasks:Running task console_log... 2026-03-09T16:35:35.279 DEBUG:teuthology.task.console_log:vm06 does not support IPMI; excluding 2026-03-09T16:35:35.283 DEBUG:teuthology.task.console_log:vm09 does not support IPMI; excluding 2026-03-09T16:35:35.283 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fed983ab880>, signals=[15]) 2026-03-09T16:35:35.283 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-09T16:35:35.284 INFO:teuthology.task.internal:Opening connections... 2026-03-09T16:35:35.284 DEBUG:teuthology.task.internal:connecting to ubuntu@vm06.local 2026-03-09T16:35:35.284 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T16:35:35.341 DEBUG:teuthology.task.internal:connecting to ubuntu@vm09.local 2026-03-09T16:35:35.342 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T16:35:35.403 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-09T16:35:35.404 DEBUG:teuthology.orchestra.run.vm06:> uname -m 2026-03-09T16:35:35.421 INFO:teuthology.orchestra.run.vm06.stdout:x86_64 2026-03-09T16:35:35.421 DEBUG:teuthology.orchestra.run.vm06:> cat /etc/os-release 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:PRETTY_NAME="Ubuntu 22.04.5 LTS" 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:NAME="Ubuntu" 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_ID="22.04" 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:VERSION="22.04.5 LTS (Jammy Jellyfish)" 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_CODENAME=jammy 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:ID=ubuntu 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:ID_LIKE=debian 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:HOME_URL="https://www.ubuntu.com/" 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:SUPPORT_URL="https://help.ubuntu.com/" 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 2026-03-09T16:35:35.465 INFO:teuthology.orchestra.run.vm06.stdout:UBUNTU_CODENAME=jammy 2026-03-09T16:35:35.465 INFO:teuthology.lock.ops:Updating vm06.local on lock server 2026-03-09T16:35:35.469 DEBUG:teuthology.orchestra.run.vm09:> uname -m 2026-03-09T16:35:35.472 INFO:teuthology.orchestra.run.vm09.stdout:x86_64 2026-03-09T16:35:35.472 DEBUG:teuthology.orchestra.run.vm09:> cat /etc/os-release 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:PRETTY_NAME="Ubuntu 22.04.5 LTS" 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:NAME="Ubuntu" 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:VERSION_ID="22.04" 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:VERSION="22.04.5 LTS (Jammy Jellyfish)" 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:VERSION_CODENAME=jammy 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:ID=ubuntu 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:ID_LIKE=debian 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:HOME_URL="https://www.ubuntu.com/" 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:SUPPORT_URL="https://help.ubuntu.com/" 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 2026-03-09T16:35:35.516 INFO:teuthology.orchestra.run.vm09.stdout:UBUNTU_CODENAME=jammy 2026-03-09T16:35:35.516 INFO:teuthology.lock.ops:Updating vm09.local on lock server 2026-03-09T16:35:35.521 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-09T16:35:35.522 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-09T16:35:35.523 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-09T16:35:35.523 DEBUG:teuthology.orchestra.run.vm06:> test '!' -e /home/ubuntu/cephtest 2026-03-09T16:35:35.524 DEBUG:teuthology.orchestra.run.vm09:> test '!' -e /home/ubuntu/cephtest 2026-03-09T16:35:35.559 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-09T16:35:35.560 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-09T16:35:35.560 DEBUG:teuthology.orchestra.run.vm06:> test -z $(ls -A /var/lib/ceph) 2026-03-09T16:35:35.571 DEBUG:teuthology.orchestra.run.vm09:> test -z $(ls -A /var/lib/ceph) 2026-03-09T16:35:35.573 INFO:teuthology.orchestra.run.vm06.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T16:35:35.603 INFO:teuthology.orchestra.run.vm09.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T16:35:35.604 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-09T16:35:35.611 DEBUG:teuthology.orchestra.run.vm06:> test -e /ceph-qa-ready 2026-03-09T16:35:35.616 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:35:35.838 DEBUG:teuthology.orchestra.run.vm09:> test -e /ceph-qa-ready 2026-03-09T16:35:35.841 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:35:36.062 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-09T16:35:36.063 INFO:teuthology.task.internal:Creating test directory... 2026-03-09T16:35:36.063 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T16:35:36.065 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T16:35:36.067 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-09T16:35:36.068 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-09T16:35:36.069 INFO:teuthology.task.internal:Creating archive directory... 2026-03-09T16:35:36.069 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T16:35:36.111 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T16:35:36.115 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-09T16:35:36.116 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-09T16:35:36.116 DEBUG:teuthology.orchestra.run.vm06:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T16:35:36.156 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:35:36.156 DEBUG:teuthology.orchestra.run.vm09:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T16:35:36.159 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:35:36.159 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T16:35:36.199 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T16:35:36.205 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T16:35:36.208 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T16:35:36.210 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T16:35:36.213 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T16:35:36.214 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-09T16:35:36.215 INFO:teuthology.task.internal:Configuring sudo... 2026-03-09T16:35:36.215 DEBUG:teuthology.orchestra.run.vm06:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T16:35:36.255 DEBUG:teuthology.orchestra.run.vm09:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T16:35:36.264 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-09T16:35:36.266 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-09T16:35:36.266 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T16:35:36.302 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T16:35:36.307 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T16:35:36.348 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T16:35:36.392 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T16:35:36.393 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T16:35:36.441 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T16:35:36.444 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T16:35:36.491 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T16:35:36.491 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T16:35:36.540 DEBUG:teuthology.orchestra.run.vm06:> sudo service rsyslog restart 2026-03-09T16:35:36.541 DEBUG:teuthology.orchestra.run.vm09:> sudo service rsyslog restart 2026-03-09T16:35:36.596 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-09T16:35:36.598 INFO:teuthology.task.internal:Starting timer... 2026-03-09T16:35:36.598 INFO:teuthology.run_tasks:Running task pcp... 2026-03-09T16:35:36.600 INFO:teuthology.run_tasks:Running task selinux... 2026-03-09T16:35:36.602 INFO:teuthology.task.selinux:Excluding vm06: VMs are not yet supported 2026-03-09T16:35:36.602 INFO:teuthology.task.selinux:Excluding vm09: VMs are not yet supported 2026-03-09T16:35:36.602 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-09T16:35:36.602 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-09T16:35:36.602 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-09T16:35:36.602 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-09T16:35:36.604 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-09T16:35:36.604 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-09T16:35:36.605 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-09T16:35:37.250 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-09T16:35:37.256 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-09T16:35:37.256 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventory5r327vj1 --limit vm06.local,vm09.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-09T16:37:43.242 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm06.local'), Remote(name='ubuntu@vm09.local')] 2026-03-09T16:37:43.242 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm06.local' 2026-03-09T16:37:43.242 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T16:37:43.300 DEBUG:teuthology.orchestra.run.vm06:> true 2026-03-09T16:37:43.524 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm06.local' 2026-03-09T16:37:43.525 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm09.local' 2026-03-09T16:37:43.525 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T16:37:43.589 DEBUG:teuthology.orchestra.run.vm09:> true 2026-03-09T16:37:43.805 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm09.local' 2026-03-09T16:37:43.805 INFO:teuthology.run_tasks:Running task clock... 2026-03-09T16:37:43.807 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-09T16:37:43.808 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T16:37:43.808 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T16:37:43.809 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T16:37:43.809 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T16:37:43.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: ntpd 4.2.8p15@1.3728-o Wed Feb 16 17:13:02 UTC 2022 (1): Starting 2026-03-09T16:37:43.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: Command line: ntpd -gq 2026-03-09T16:37:43.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: ---------------------------------------------------- 2026-03-09T16:37:43.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: ntp-4 is maintained by Network Time Foundation, 2026-03-09T16:37:43.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: Inc. (NTF), a non-profit 501(c)(3) public-benefit 2026-03-09T16:37:43.825 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: corporation. Support and training for ntp-4 are 2026-03-09T16:37:43.825 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: available at https://www.nwtime.org/support 2026-03-09T16:37:43.825 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: ---------------------------------------------------- 2026-03-09T16:37:43.825 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: proto: precision = 0.030 usec (-25) 2026-03-09T16:37:43.825 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: basedate set to 2022-02-04 2026-03-09T16:37:43.825 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: gps base set to 2022-02-06 (week 2196) 2026-03-09T16:37:43.825 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): good hash signature 2026-03-09T16:37:43.826 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): loaded, expire=2025-12-28T00:00:00Z last=2017-01-01T00:00:00Z ofs=37 2026-03-09T16:37:43.826 INFO:teuthology.orchestra.run.vm06.stderr: 9 Mar 16:37:43 ntpd[16098]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): expired 72 days ago 2026-03-09T16:37:43.827 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: Listen and drop on 0 v6wildcard [::]:123 2026-03-09T16:37:43.827 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: Listen and drop on 1 v4wildcard 0.0.0.0:123 2026-03-09T16:37:43.827 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: Listen normally on 2 lo 127.0.0.1:123 2026-03-09T16:37:43.827 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: Listen normally on 3 ens3 192.168.123.106:123 2026-03-09T16:37:43.827 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: Listen normally on 4 lo [::1]:123 2026-03-09T16:37:43.827 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: Listen normally on 5 ens3 [fe80::5055:ff:fe00:6%2]:123 2026-03-09T16:37:43.827 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:43 ntpd[16098]: Listening on routing socket on fd #22 for interface updates 2026-03-09T16:37:43.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: ntpd 4.2.8p15@1.3728-o Wed Feb 16 17:13:02 UTC 2022 (1): Starting 2026-03-09T16:37:43.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: Command line: ntpd -gq 2026-03-09T16:37:43.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: ---------------------------------------------------- 2026-03-09T16:37:43.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: ntp-4 is maintained by Network Time Foundation, 2026-03-09T16:37:43.867 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: Inc. (NTF), a non-profit 501(c)(3) public-benefit 2026-03-09T16:37:43.867 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: corporation. Support and training for ntp-4 are 2026-03-09T16:37:43.867 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: available at https://www.nwtime.org/support 2026-03-09T16:37:43.867 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: ---------------------------------------------------- 2026-03-09T16:37:43.867 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: proto: precision = 0.029 usec (-25) 2026-03-09T16:37:43.867 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: basedate set to 2022-02-04 2026-03-09T16:37:43.867 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: gps base set to 2022-02-06 (week 2196) 2026-03-09T16:37:43.867 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): good hash signature 2026-03-09T16:37:43.867 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): loaded, expire=2025-12-28T00:00:00Z last=2017-01-01T00:00:00Z ofs=37 2026-03-09T16:37:43.867 INFO:teuthology.orchestra.run.vm09.stderr: 9 Mar 16:37:43 ntpd[16070]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): expired 72 days ago 2026-03-09T16:37:43.868 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: Listen and drop on 0 v6wildcard [::]:123 2026-03-09T16:37:43.868 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: Listen and drop on 1 v4wildcard 0.0.0.0:123 2026-03-09T16:37:43.868 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: Listen normally on 2 lo 127.0.0.1:123 2026-03-09T16:37:43.868 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: Listen normally on 3 ens3 192.168.123.109:123 2026-03-09T16:37:43.869 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: Listen normally on 4 lo [::1]:123 2026-03-09T16:37:43.869 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: Listen normally on 5 ens3 [fe80::5055:ff:fe00:9%2]:123 2026-03-09T16:37:43.869 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:43 ntpd[16070]: Listening on routing socket on fd #22 for interface updates 2026-03-09T16:37:44.826 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:44 ntpd[16098]: Soliciting pool server 62.113.219.231 2026-03-09T16:37:44.868 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:44 ntpd[16070]: Soliciting pool server 62.113.219.231 2026-03-09T16:37:45.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:45 ntpd[16098]: Soliciting pool server 176.9.8.206 2026-03-09T16:37:45.825 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:45 ntpd[16098]: Soliciting pool server 172.104.149.161 2026-03-09T16:37:45.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:45 ntpd[16070]: Soliciting pool server 176.9.8.206 2026-03-09T16:37:45.867 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:45 ntpd[16070]: Soliciting pool server 172.104.149.161 2026-03-09T16:37:46.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:46 ntpd[16098]: Soliciting pool server 144.76.66.156 2026-03-09T16:37:46.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:46 ntpd[16098]: Soliciting pool server 185.252.140.126 2026-03-09T16:37:46.825 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:46 ntpd[16098]: Soliciting pool server 195.201.20.16 2026-03-09T16:37:46.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:46 ntpd[16070]: Soliciting pool server 144.76.66.156 2026-03-09T16:37:46.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:46 ntpd[16070]: Soliciting pool server 185.252.140.126 2026-03-09T16:37:46.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:46 ntpd[16070]: Soliciting pool server 195.201.20.16 2026-03-09T16:37:47.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:47 ntpd[16098]: Soliciting pool server 94.16.122.152 2026-03-09T16:37:47.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:47 ntpd[16098]: Soliciting pool server 168.119.152.72 2026-03-09T16:37:47.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:47 ntpd[16098]: Soliciting pool server 185.252.140.125 2026-03-09T16:37:47.824 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:47 ntpd[16098]: Soliciting pool server 90.187.112.137 2026-03-09T16:37:47.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:47 ntpd[16070]: Soliciting pool server 94.16.122.152 2026-03-09T16:37:47.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:47 ntpd[16070]: Soliciting pool server 168.119.152.72 2026-03-09T16:37:47.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:47 ntpd[16070]: Soliciting pool server 185.252.140.125 2026-03-09T16:37:47.866 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:47 ntpd[16070]: Soliciting pool server 90.187.112.137 2026-03-09T16:37:48.823 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:48 ntpd[16098]: Soliciting pool server 178.215.228.24 2026-03-09T16:37:48.823 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:48 ntpd[16098]: Soliciting pool server 78.47.56.71 2026-03-09T16:37:48.823 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:48 ntpd[16098]: Soliciting pool server 134.60.1.30 2026-03-09T16:37:48.855 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:48 ntpd[16098]: Soliciting pool server 185.125.190.58 2026-03-09T16:37:48.865 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:48 ntpd[16070]: Soliciting pool server 178.215.228.24 2026-03-09T16:37:48.865 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:48 ntpd[16070]: Soliciting pool server 78.47.56.71 2026-03-09T16:37:48.865 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:48 ntpd[16070]: Soliciting pool server 185.125.190.58 2026-03-09T16:37:49.822 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:49 ntpd[16098]: Soliciting pool server 185.125.190.56 2026-03-09T16:37:49.823 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:49 ntpd[16098]: Soliciting pool server 185.232.69.65 2026-03-09T16:37:49.823 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:49 ntpd[16098]: Soliciting pool server 139.162.152.20 2026-03-09T16:37:49.864 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:49 ntpd[16070]: Soliciting pool server 185.125.190.56 2026-03-09T16:37:49.865 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:49 ntpd[16070]: Soliciting pool server 185.232.69.65 2026-03-09T16:37:49.865 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:49 ntpd[16070]: Soliciting pool server 139.162.152.20 2026-03-09T16:37:50.864 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:50 ntpd[16070]: Soliciting pool server 91.189.91.157 2026-03-09T16:37:50.864 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:50 ntpd[16070]: Soliciting pool server 148.251.5.46 2026-03-09T16:37:50.864 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:50 ntpd[16070]: Soliciting pool server 2003:a:87f:c37c::4 2026-03-09T16:37:51.842 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 16:37:51 ntpd[16098]: ntpd: time slew +0.000001 s 2026-03-09T16:37:51.842 INFO:teuthology.orchestra.run.vm06.stdout:ntpd: time slew +0.000001s 2026-03-09T16:37:51.863 INFO:teuthology.orchestra.run.vm06.stdout: remote refid st t when poll reach delay offset jitter 2026-03-09T16:37:51.863 INFO:teuthology.orchestra.run.vm06.stdout:============================================================================== 2026-03-09T16:37:51.863 INFO:teuthology.orchestra.run.vm06.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:51.863 INFO:teuthology.orchestra.run.vm06.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:51.863 INFO:teuthology.orchestra.run.vm06.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:51.863 INFO:teuthology.orchestra.run.vm06.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:51.863 INFO:teuthology.orchestra.run.vm06.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:52.886 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 16:37:52 ntpd[16070]: ntpd: time slew +0.010506 s 2026-03-09T16:37:52.886 INFO:teuthology.orchestra.run.vm09.stdout:ntpd: time slew +0.010506s 2026-03-09T16:37:52.908 INFO:teuthology.orchestra.run.vm09.stdout: remote refid st t when poll reach delay offset jitter 2026-03-09T16:37:52.909 INFO:teuthology.orchestra.run.vm09.stdout:============================================================================== 2026-03-09T16:37:52.909 INFO:teuthology.orchestra.run.vm09.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:52.909 INFO:teuthology.orchestra.run.vm09.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:52.909 INFO:teuthology.orchestra.run.vm09.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:52.909 INFO:teuthology.orchestra.run.vm09.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:52.909 INFO:teuthology.orchestra.run.vm09.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:52.909 INFO:teuthology.run_tasks:Running task nvme_loop... 2026-03-09T16:37:52.912 INFO:tasks.nvme_loop:Setting up nvme_loop on scratch devices... 2026-03-09T16:37:52.912 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T16:37:52.912 DEBUG:teuthology.orchestra.run.vm06:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T16:37:52.915 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:37:52.916 DEBUG:teuthology.orchestra.run.vm06:> ls /dev/[sv]d? 2026-03-09T16:37:52.960 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vda 2026-03-09T16:37:52.960 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdb 2026-03-09T16:37:52.960 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdc 2026-03-09T16:37:52.961 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdd 2026-03-09T16:37:52.961 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vde 2026-03-09T16:37:52.961 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-09T16:37:52.961 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-09T16:37:52.961 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdb 2026-03-09T16:37:53.004 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdb 2026-03-09T16:37:53.004 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-09T16:37:53.004 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 24 Links: 1 Device type: fe,10 2026-03-09T16:37:53.004 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T16:37:53.005 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 16:34:34.947695650 +0000 2026-03-09T16:37:53.005 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 16:34:33.891695650 +0000 2026-03-09T16:37:53.005 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 16:34:33.891695650 +0000 2026-03-09T16:37:53.005 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-03-09T16:37:53.005 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-09T16:37:53.051 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T16:37:53.051 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T16:37:53.051 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000146465 s, 3.5 MB/s 2026-03-09T16:37:53.052 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-09T16:37:53.097 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdc 2026-03-09T16:37:53.140 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdc 2026-03-09T16:37:53.140 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-09T16:37:53.140 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 25 Links: 1 Device type: fe,20 2026-03-09T16:37:53.140 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T16:37:53.140 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 16:34:34.955695650 +0000 2026-03-09T16:37:53.140 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 16:34:33.883695650 +0000 2026-03-09T16:37:53.140 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 16:34:33.883695650 +0000 2026-03-09T16:37:53.140 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-03-09T16:37:53.140 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-09T16:37:53.187 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T16:37:53.187 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T16:37:53.187 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000138891 s, 3.7 MB/s 2026-03-09T16:37:53.188 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-09T16:37:53.233 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdd 2026-03-09T16:37:53.276 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdd 2026-03-09T16:37:53.276 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-09T16:37:53.276 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 26 Links: 1 Device type: fe,30 2026-03-09T16:37:53.276 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T16:37:53.276 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 16:34:34.939695650 +0000 2026-03-09T16:37:53.276 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 16:34:33.883695650 +0000 2026-03-09T16:37:53.276 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 16:34:33.883695650 +0000 2026-03-09T16:37:53.276 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-03-09T16:37:53.276 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-09T16:37:53.323 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T16:37:53.324 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T16:37:53.324 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000170859 s, 3.0 MB/s 2026-03-09T16:37:53.324 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-09T16:37:53.369 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vde 2026-03-09T16:37:53.412 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vde 2026-03-09T16:37:53.412 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-09T16:37:53.412 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 27 Links: 1 Device type: fe,40 2026-03-09T16:37:53.412 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T16:37:53.412 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 16:34:34.951695650 +0000 2026-03-09T16:37:53.412 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 16:34:33.923695650 +0000 2026-03-09T16:37:53.412 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 16:34:33.923695650 +0000 2026-03-09T16:37:53.412 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-03-09T16:37:53.412 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-09T16:37:53.459 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T16:37:53.459 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T16:37:53.459 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000132338 s, 3.9 MB/s 2026-03-09T16:37:53.460 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-09T16:37:53.505 DEBUG:teuthology.orchestra.run.vm06:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-03-09T16:37:53.553 INFO:teuthology.orchestra.run.vm06.stderr:modprobe: FATAL: Module nvme_loop not found in directory /lib/modules/5.15.0-1092-kvm 2026-03-09T16:37:53.553 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:37:53.553 ERROR:teuthology.run_tasks:Saw exception from tasks. Traceback (most recent call last): File "/home/teuthos/teuthology/teuthology/run_tasks.py", line 112, in run_tasks manager.__enter__() File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 135, in __enter__ return next(self.gen) File "/home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks/nvme_loop.py", line 27, in task remote.run( File "/home/teuthos/teuthology/teuthology/orchestra/remote.py", line 575, in run r = self._runner(client=self.ssh, name=self.shortname, **kwargs) File "/home/teuthos/teuthology/teuthology/orchestra/run.py", line 461, in run r.wait() File "/home/teuthos/teuthology/teuthology/orchestra/run.py", line 161, in wait self._raise_for_status() File "/home/teuthos/teuthology/teuthology/orchestra/run.py", line 181, in _raise_for_status raise CommandFailedError( teuthology.exceptions.CommandFailedError: Command failed on vm06 with status 1: "grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype" 2026-03-09T16:37:53.554 DEBUG:teuthology.run_tasks:Unwinding manager nvme_loop 2026-03-09T16:37:53.556 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-09T16:37:53.557 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-09T16:37:53.557 DEBUG:teuthology.orchestra.run.vm06:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T16:37:53.594 DEBUG:teuthology.orchestra.run.vm09:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T16:37:53.598 INFO:teuthology.orchestra.run.vm06.stdout: remote refid st t when poll reach delay offset jitter 2026-03-09T16:37:53.598 INFO:teuthology.orchestra.run.vm06.stdout:============================================================================== 2026-03-09T16:37:53.598 INFO:teuthology.orchestra.run.vm06.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:53.598 INFO:teuthology.orchestra.run.vm06.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:53.598 INFO:teuthology.orchestra.run.vm06.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:53.598 INFO:teuthology.orchestra.run.vm06.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:53.598 INFO:teuthology.orchestra.run.vm06.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:53.600 INFO:teuthology.orchestra.run.vm09.stdout: remote refid st t when poll reach delay offset jitter 2026-03-09T16:37:53.600 INFO:teuthology.orchestra.run.vm09.stdout:============================================================================== 2026-03-09T16:37:53.600 INFO:teuthology.orchestra.run.vm09.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:53.600 INFO:teuthology.orchestra.run.vm09.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:53.600 INFO:teuthology.orchestra.run.vm09.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:53.600 INFO:teuthology.orchestra.run.vm09.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:53.600 INFO:teuthology.orchestra.run.vm09.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T16:37:53.600 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-09T16:37:53.602 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-09T16:37:53.602 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-09T16:37:53.604 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-09T16:37:53.606 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-09T16:37:53.608 INFO:teuthology.task.internal:Duration was 137.010259 seconds 2026-03-09T16:37:53.608 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-09T16:37:53.610 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-09T16:37:53.610 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T16:37:53.643 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T16:37:53.666 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-09T16:37:53.666 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm06.local 2026-03-09T16:37:53.666 DEBUG:teuthology.orchestra.run.vm06:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T16:37:53.716 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm09.local 2026-03-09T16:37:53.716 DEBUG:teuthology.orchestra.run.vm09:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T16:37:53.728 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-09T16:37:53.728 DEBUG:teuthology.orchestra.run.vm06:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T16:37:53.759 DEBUG:teuthology.orchestra.run.vm09:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T16:37:53.791 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-09T16:37:53.791 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T16:37:53.819 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T16:37:53.824 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T16:37:53.824 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T16:37:53.824 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T16:37:53.824 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T16:37:53.825 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz/home/ubuntu/cephtest/archive/syslog/journalctl.log: 2026-03-09T16:37:53.827 INFO:teuthology.orchestra.run.vm06.stderr: 83.6% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T16:37:53.839 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T16:37:53.839 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T16:37:53.839 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T16:37:53.839 INFO:teuthology.orchestra.run.vm09.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T16:37:53.840 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T16:37:53.843 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 83.5% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T16:37:53.844 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-09T16:37:53.846 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-09T16:37:53.846 DEBUG:teuthology.orchestra.run.vm06:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T16:37:53.880 DEBUG:teuthology.orchestra.run.vm09:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T16:37:53.896 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-09T16:37:53.899 DEBUG:teuthology.orchestra.run.vm06:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T16:37:53.926 DEBUG:teuthology.orchestra.run.vm09:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T16:37:53.931 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = core 2026-03-09T16:37:53.947 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = core 2026-03-09T16:37:53.955 DEBUG:teuthology.orchestra.run.vm06:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T16:37:53.984 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:37:53.984 DEBUG:teuthology.orchestra.run.vm09:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T16:37:53.997 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T16:37:53.997 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-09T16:37:54.000 INFO:teuthology.task.internal:Transferring archived files... 2026-03-09T16:37:54.000 DEBUG:teuthology.misc:Transferring archived files from vm06:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/554/remote/vm06 2026-03-09T16:37:54.001 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T16:37:54.033 DEBUG:teuthology.misc:Transferring archived files from vm09:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/554/remote/vm09 2026-03-09T16:37:54.033 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T16:37:54.046 INFO:teuthology.task.internal:Removing archive directory... 2026-03-09T16:37:54.046 DEBUG:teuthology.orchestra.run.vm06:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T16:37:54.075 DEBUG:teuthology.orchestra.run.vm09:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T16:37:54.090 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-09T16:37:54.092 INFO:teuthology.task.internal:Not uploading archives. 2026-03-09T16:37:54.092 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-09T16:37:54.095 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-09T16:37:54.095 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T16:37:54.119 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T16:37:54.121 INFO:teuthology.orchestra.run.vm06.stdout: 258069 4 drwxr-xr-x 2 ubuntu ubuntu 4096 Mar 9 16:37 /home/ubuntu/cephtest 2026-03-09T16:37:54.134 INFO:teuthology.orchestra.run.vm09.stdout: 258076 4 drwxr-xr-x 2 ubuntu ubuntu 4096 Mar 9 16:37 /home/ubuntu/cephtest 2026-03-09T16:37:54.134 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-09T16:37:54.140 INFO:teuthology.run:Summary data: description: orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} duration: 137.01025915145874 failure_reason: 'Command failed on vm06 with status 1: "grep ''^nvme_loop'' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype"' owner: kyr sentry_event: null status: fail success: false 2026-03-09T16:37:54.141 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T16:37:54.159 INFO:teuthology.run:FAIL