2026-03-09T17:12:36.492 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-09T17:12:36.498 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T17:12:36.520 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/582 branch: squid description: orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} email: null first_in_suite: false flavor: default job_id: '582' ktype: distro last_in_suite: false machine_type: vps name: kyr-2026-03-09_11:23:05-orch-squid-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: ubuntu os_version: '22.04' overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath workunit: branch: tt-squid sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 3443 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 targets: vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBK9K1spDQopbtrvhAiFIOZjChsZPkn3DyTEFj7eyA/dZh6cxyT3pAJ32/2E1LbOVWkD3BAkf+yBDeddW78pwZ00= vm09.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAqjpBw/8xf0YFOfX0pZtrUm53/7uyhSyWA78QqM2C5LVPhBexJYwbk3UArcr+6TAARcTf8B735gpt2jsVuS5PE= tasks: - nvme_loop: null - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip: null - cephadm.shell: host.a: - ceph orch device ls --refresh - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph orch apply rgw foorgw --port 8800 - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} - vip.exec: host.a: - dnf install -y python3-boto3 || apt install -y python3-boto3 - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json - python: host.a: "import boto3\nimport json\n\nwith open('/tmp/user.json', 'rt') as f:\n\ \ info = json.loads(f.read())\ns3 = boto3.resource(\n 's3',\n aws_access_key_id=info['keys'][0]['access_key'],\n\ \ aws_secret_access_key=info['keys'][0]['secret_key'],\n endpoint_url='http://localhost:8800',\n\ )\nbucket = s3.Bucket('foobucket')\nbucket.create()\nbucket.put_object(Key='myobject',\ \ Body='thebody')\n" - cephadm.shell: host.a: - ceph nfs export create rgw --cluster-id foo --pseudo-path /foouser --user-id foouser - cephadm.wait_for_service: service: nfs.foo - cephadm.wait_for_service: service: ingress.nfs.foo - vip.exec: host.a: - mkdir /mnt/foo - sleep 5 - mount -t nfs {{VIP0}}:/foouser /mnt/foo - test -d /mnt/foo/foobucket - find /mnt/foo -ls - grep thebody /mnt/foo/foobucket/myobject - echo test > /mnt/foo/foobucket/newobject - sync - python: host.a: "import boto3\nimport json\nfrom io import BytesIO\n\nwith open('/tmp/user.json',\ \ 'rt') as f:\n info = json.loads(f.read())\ns3 = boto3.resource(\n 's3',\n\ \ aws_access_key_id=info['keys'][0]['access_key'],\n aws_secret_access_key=info['keys'][0]['secret_key'],\n\ \ endpoint_url='http://localhost:8800',\n)\nbucket = s3.Bucket('foobucket')\n\ data = BytesIO()\nbucket.download_fileobj(Fileobj=data, Key='newobject')\nprint(data.getvalue())\n\ assert data.getvalue().decode() == 'test\\n'\n" - vip.exec: host.a: - umount /mnt/foo - cephadm.shell: host.a: - ceph nfs export rm foo /foouser - ceph nfs cluster rm foo - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-09_11:23:05 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-09T17:12:36.520 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa; will attempt to use it 2026-03-09T17:12:36.521 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks 2026-03-09T17:12:36.521 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-09T17:12:36.521 INFO:teuthology.task.internal:Checking packages... 2026-03-09T17:12:36.521 INFO:teuthology.task.internal:Checking packages for os_type 'ubuntu', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-09T17:12:36.521 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-09T17:12:36.521 INFO:teuthology.packaging:ref: None 2026-03-09T17:12:36.521 INFO:teuthology.packaging:tag: None 2026-03-09T17:12:36.521 INFO:teuthology.packaging:branch: squid 2026-03-09T17:12:36.521 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T17:12:36.521 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=ubuntu%2F22.04%2Fx86_64&ref=squid 2026-03-09T17:12:37.179 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678-ge911bdeb-1jammy 2026-03-09T17:12:37.180 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-09T17:12:37.181 INFO:teuthology.task.internal:no buildpackages task found 2026-03-09T17:12:37.181 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-09T17:12:37.181 INFO:teuthology.task.internal:Saving configuration 2026-03-09T17:12:37.187 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-09T17:12:37.187 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-09T17:12:37.194 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm06.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/582', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'ubuntu', 'os_version': '22.04', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 17:11:21.476346', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:06', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBK9K1spDQopbtrvhAiFIOZjChsZPkn3DyTEFj7eyA/dZh6cxyT3pAJ32/2E1LbOVWkD3BAkf+yBDeddW78pwZ00='} 2026-03-09T17:12:37.200 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm09.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/582', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'ubuntu', 'os_version': '22.04', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 17:11:21.475883', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:09', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAqjpBw/8xf0YFOfX0pZtrUm53/7uyhSyWA78QqM2C5LVPhBexJYwbk3UArcr+6TAARcTf8B735gpt2jsVuS5PE='} 2026-03-09T17:12:37.200 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-09T17:12:37.201 INFO:teuthology.task.internal:roles: ubuntu@vm06.local - ['host.a', 'client.0'] 2026-03-09T17:12:37.201 INFO:teuthology.task.internal:roles: ubuntu@vm09.local - ['host.b', 'client.1'] 2026-03-09T17:12:37.201 INFO:teuthology.run_tasks:Running task console_log... 2026-03-09T17:12:37.208 DEBUG:teuthology.task.console_log:vm06 does not support IPMI; excluding 2026-03-09T17:12:37.214 DEBUG:teuthology.task.console_log:vm09 does not support IPMI; excluding 2026-03-09T17:12:37.214 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fa321ecbd00>, signals=[15]) 2026-03-09T17:12:37.214 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-09T17:12:37.215 INFO:teuthology.task.internal:Opening connections... 2026-03-09T17:12:37.215 DEBUG:teuthology.task.internal:connecting to ubuntu@vm06.local 2026-03-09T17:12:37.216 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T17:12:37.275 DEBUG:teuthology.task.internal:connecting to ubuntu@vm09.local 2026-03-09T17:12:37.276 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T17:12:37.336 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-09T17:12:37.337 DEBUG:teuthology.orchestra.run.vm06:> uname -m 2026-03-09T17:12:37.345 INFO:teuthology.orchestra.run.vm06.stdout:x86_64 2026-03-09T17:12:37.345 DEBUG:teuthology.orchestra.run.vm06:> cat /etc/os-release 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:PRETTY_NAME="Ubuntu 22.04.5 LTS" 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:NAME="Ubuntu" 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_ID="22.04" 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:VERSION="22.04.5 LTS (Jammy Jellyfish)" 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_CODENAME=jammy 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:ID=ubuntu 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:ID_LIKE=debian 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:HOME_URL="https://www.ubuntu.com/" 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:SUPPORT_URL="https://help.ubuntu.com/" 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 2026-03-09T17:12:37.390 INFO:teuthology.orchestra.run.vm06.stdout:UBUNTU_CODENAME=jammy 2026-03-09T17:12:37.390 INFO:teuthology.lock.ops:Updating vm06.local on lock server 2026-03-09T17:12:37.395 DEBUG:teuthology.orchestra.run.vm09:> uname -m 2026-03-09T17:12:37.405 INFO:teuthology.orchestra.run.vm09.stdout:x86_64 2026-03-09T17:12:37.405 DEBUG:teuthology.orchestra.run.vm09:> cat /etc/os-release 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:PRETTY_NAME="Ubuntu 22.04.5 LTS" 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:NAME="Ubuntu" 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:VERSION_ID="22.04" 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:VERSION="22.04.5 LTS (Jammy Jellyfish)" 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:VERSION_CODENAME=jammy 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:ID=ubuntu 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:ID_LIKE=debian 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:HOME_URL="https://www.ubuntu.com/" 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:SUPPORT_URL="https://help.ubuntu.com/" 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 2026-03-09T17:12:37.449 INFO:teuthology.orchestra.run.vm09.stdout:UBUNTU_CODENAME=jammy 2026-03-09T17:12:37.449 INFO:teuthology.lock.ops:Updating vm09.local on lock server 2026-03-09T17:12:37.454 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-09T17:12:37.456 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-09T17:12:37.457 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-09T17:12:37.457 DEBUG:teuthology.orchestra.run.vm06:> test '!' -e /home/ubuntu/cephtest 2026-03-09T17:12:37.458 DEBUG:teuthology.orchestra.run.vm09:> test '!' -e /home/ubuntu/cephtest 2026-03-09T17:12:37.492 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-09T17:12:37.493 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-09T17:12:37.493 DEBUG:teuthology.orchestra.run.vm06:> test -z $(ls -A /var/lib/ceph) 2026-03-09T17:12:37.504 DEBUG:teuthology.orchestra.run.vm09:> test -z $(ls -A /var/lib/ceph) 2026-03-09T17:12:37.506 INFO:teuthology.orchestra.run.vm06.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T17:12:37.536 INFO:teuthology.orchestra.run.vm09.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T17:12:37.537 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-09T17:12:37.544 DEBUG:teuthology.orchestra.run.vm06:> test -e /ceph-qa-ready 2026-03-09T17:12:37.549 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:12:37.871 DEBUG:teuthology.orchestra.run.vm09:> test -e /ceph-qa-ready 2026-03-09T17:12:37.874 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:12:38.114 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-09T17:12:38.115 INFO:teuthology.task.internal:Creating test directory... 2026-03-09T17:12:38.115 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T17:12:38.116 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T17:12:38.120 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-09T17:12:38.121 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-09T17:12:38.122 INFO:teuthology.task.internal:Creating archive directory... 2026-03-09T17:12:38.122 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T17:12:38.160 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T17:12:38.165 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-09T17:12:38.166 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-09T17:12:38.167 DEBUG:teuthology.orchestra.run.vm06:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T17:12:38.205 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:12:38.205 DEBUG:teuthology.orchestra.run.vm09:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T17:12:38.208 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:12:38.208 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T17:12:38.248 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T17:12:38.256 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T17:12:38.257 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T17:12:38.261 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T17:12:38.262 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T17:12:38.262 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-09T17:12:38.264 INFO:teuthology.task.internal:Configuring sudo... 2026-03-09T17:12:38.264 DEBUG:teuthology.orchestra.run.vm06:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T17:12:38.304 DEBUG:teuthology.orchestra.run.vm09:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T17:12:38.311 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-09T17:12:38.314 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-09T17:12:38.314 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T17:12:38.356 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T17:12:38.359 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T17:12:38.402 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T17:12:38.445 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:12:38.446 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T17:12:38.496 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T17:12:38.499 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T17:12:38.544 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T17:12:38.545 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T17:12:38.592 DEBUG:teuthology.orchestra.run.vm06:> sudo service rsyslog restart 2026-03-09T17:12:38.593 DEBUG:teuthology.orchestra.run.vm09:> sudo service rsyslog restart 2026-03-09T17:12:38.650 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-09T17:12:38.652 INFO:teuthology.task.internal:Starting timer... 2026-03-09T17:12:38.652 INFO:teuthology.run_tasks:Running task pcp... 2026-03-09T17:12:38.655 INFO:teuthology.run_tasks:Running task selinux... 2026-03-09T17:12:38.657 INFO:teuthology.task.selinux:Excluding vm06: VMs are not yet supported 2026-03-09T17:12:38.657 INFO:teuthology.task.selinux:Excluding vm09: VMs are not yet supported 2026-03-09T17:12:38.657 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-09T17:12:38.657 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-09T17:12:38.657 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-09T17:12:38.657 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-09T17:12:38.659 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-09T17:12:38.659 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-09T17:12:38.660 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-09T17:12:39.247 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-09T17:12:39.253 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-09T17:12:39.253 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventoryvh9itbvz --limit vm06.local,vm09.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-09T17:14:43.145 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm06.local'), Remote(name='ubuntu@vm09.local')] 2026-03-09T17:14:43.146 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm06.local' 2026-03-09T17:14:43.146 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T17:14:43.207 DEBUG:teuthology.orchestra.run.vm06:> true 2026-03-09T17:14:43.420 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm06.local' 2026-03-09T17:14:43.421 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm09.local' 2026-03-09T17:14:43.421 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T17:14:43.482 DEBUG:teuthology.orchestra.run.vm09:> true 2026-03-09T17:14:43.701 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm09.local' 2026-03-09T17:14:43.701 INFO:teuthology.run_tasks:Running task clock... 2026-03-09T17:14:43.704 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-09T17:14:43.704 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T17:14:43.704 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T17:14:43.705 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T17:14:43.705 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T17:14:43.722 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: ntpd 4.2.8p15@1.3728-o Wed Feb 16 17:13:02 UTC 2022 (1): Starting 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: Command line: ntpd -gq 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: ---------------------------------------------------- 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: ntp-4 is maintained by Network Time Foundation, 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: Inc. (NTF), a non-profit 501(c)(3) public-benefit 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: corporation. Support and training for ntp-4 are 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: available at https://www.nwtime.org/support 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: ---------------------------------------------------- 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: proto: precision = 0.030 usec (-25) 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: basedate set to 2022-02-04 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: gps base set to 2022-02-06 (week 2196) 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): good hash signature 2026-03-09T17:14:43.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): loaded, expire=2025-12-28T00:00:00Z last=2017-01-01T00:00:00Z ofs=37 2026-03-09T17:14:43.724 INFO:teuthology.orchestra.run.vm06.stderr: 9 Mar 17:14:43 ntpd[16105]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): expired 72 days ago 2026-03-09T17:14:43.725 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: Listen and drop on 0 v6wildcard [::]:123 2026-03-09T17:14:43.725 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: Listen and drop on 1 v4wildcard 0.0.0.0:123 2026-03-09T17:14:43.725 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: Listen normally on 2 lo 127.0.0.1:123 2026-03-09T17:14:43.725 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: Listen normally on 3 ens3 192.168.123.106:123 2026-03-09T17:14:43.725 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: Listen normally on 4 lo [::1]:123 2026-03-09T17:14:43.725 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: Listen normally on 5 ens3 [fe80::5055:ff:fe00:6%2]:123 2026-03-09T17:14:43.725 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:43 ntpd[16105]: Listening on routing socket on fd #22 for interface updates 2026-03-09T17:14:43.758 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: ntpd 4.2.8p15@1.3728-o Wed Feb 16 17:13:02 UTC 2022 (1): Starting 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: Command line: ntpd -gq 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: ---------------------------------------------------- 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: ntp-4 is maintained by Network Time Foundation, 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: Inc. (NTF), a non-profit 501(c)(3) public-benefit 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: corporation. Support and training for ntp-4 are 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: available at https://www.nwtime.org/support 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: ---------------------------------------------------- 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: proto: precision = 0.029 usec (-25) 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: basedate set to 2022-02-04 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: gps base set to 2022-02-06 (week 2196) 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): good hash signature 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): loaded, expire=2025-12-28T00:00:00Z last=2017-01-01T00:00:00Z ofs=37 2026-03-09T17:14:43.759 INFO:teuthology.orchestra.run.vm09.stderr: 9 Mar 17:14:43 ntpd[16117]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): expired 72 days ago 2026-03-09T17:14:43.760 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: Listen and drop on 0 v6wildcard [::]:123 2026-03-09T17:14:43.760 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: Listen and drop on 1 v4wildcard 0.0.0.0:123 2026-03-09T17:14:43.760 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: Listen normally on 2 lo 127.0.0.1:123 2026-03-09T17:14:43.760 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: Listen normally on 3 ens3 192.168.123.109:123 2026-03-09T17:14:43.760 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: Listen normally on 4 lo [::1]:123 2026-03-09T17:14:43.760 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: Listen normally on 5 ens3 [fe80::5055:ff:fe00:9%2]:123 2026-03-09T17:14:43.760 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:43 ntpd[16117]: Listening on routing socket on fd #22 for interface updates 2026-03-09T17:14:44.724 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:44 ntpd[16105]: Soliciting pool server 217.144.138.234 2026-03-09T17:14:44.759 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:44 ntpd[16117]: Soliciting pool server 176.9.44.212 2026-03-09T17:14:45.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:45 ntpd[16105]: Soliciting pool server 93.241.86.156 2026-03-09T17:14:45.723 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:45 ntpd[16105]: Soliciting pool server 37.114.42.119 2026-03-09T17:14:45.758 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:45 ntpd[16117]: Soliciting pool server 217.144.138.234 2026-03-09T17:14:45.758 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:45 ntpd[16117]: Soliciting pool server 162.19.170.154 2026-03-09T17:14:46.722 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:46 ntpd[16105]: Soliciting pool server 212.132.97.26 2026-03-09T17:14:46.722 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:46 ntpd[16105]: Soliciting pool server 185.252.140.125 2026-03-09T17:14:46.722 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:46 ntpd[16105]: Soliciting pool server 5.75.181.179 2026-03-09T17:14:46.758 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:46 ntpd[16117]: Soliciting pool server 37.114.42.119 2026-03-09T17:14:46.758 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:46 ntpd[16117]: Soliciting pool server 93.241.86.156 2026-03-09T17:14:46.758 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:46 ntpd[16117]: Soliciting pool server 212.132.108.186 2026-03-09T17:14:47.722 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:47 ntpd[16105]: Soliciting pool server 78.47.168.188 2026-03-09T17:14:47.722 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:47 ntpd[16105]: Soliciting pool server 141.84.43.75 2026-03-09T17:14:47.722 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:47 ntpd[16105]: Soliciting pool server 176.9.44.212 2026-03-09T17:14:47.722 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:47 ntpd[16105]: Soliciting pool server 144.76.76.107 2026-03-09T17:14:47.757 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:47 ntpd[16117]: Soliciting pool server 5.75.181.179 2026-03-09T17:14:47.757 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:47 ntpd[16117]: Soliciting pool server 212.132.97.26 2026-03-09T17:14:47.757 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:47 ntpd[16117]: Soliciting pool server 185.252.140.125 2026-03-09T17:14:47.757 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:47 ntpd[16117]: Soliciting pool server 217.160.19.219 2026-03-09T17:14:48.721 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:48 ntpd[16105]: Soliciting pool server 85.220.190.246 2026-03-09T17:14:48.721 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:48 ntpd[16105]: Soliciting pool server 172.104.154.182 2026-03-09T17:14:48.721 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:48 ntpd[16105]: Soliciting pool server 162.19.170.154 2026-03-09T17:14:48.721 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:48 ntpd[16105]: Soliciting pool server 185.125.190.56 2026-03-09T17:14:48.722 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:48 ntpd[16105]: Soliciting pool server 162.159.200.123 2026-03-09T17:14:48.756 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:48 ntpd[16117]: Soliciting pool server 144.76.76.107 2026-03-09T17:14:48.757 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:48 ntpd[16117]: Soliciting pool server 78.47.168.188 2026-03-09T17:14:48.757 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:48 ntpd[16117]: Soliciting pool server 141.84.43.75 2026-03-09T17:14:48.757 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:48 ntpd[16117]: Soliciting pool server 185.125.190.58 2026-03-09T17:14:48.757 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:48 ntpd[16117]: Soliciting pool server 162.159.200.123 2026-03-09T17:14:49.720 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:49 ntpd[16105]: Soliciting pool server 91.189.91.157 2026-03-09T17:14:49.720 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:49 ntpd[16105]: Soliciting pool server 144.76.59.106 2026-03-09T17:14:49.720 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:49 ntpd[16105]: Soliciting pool server 212.132.108.186 2026-03-09T17:14:49.720 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:49 ntpd[16105]: Soliciting pool server 78.47.56.71 2026-03-09T17:14:49.756 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:49 ntpd[16117]: Soliciting pool server 185.125.190.56 2026-03-09T17:14:49.756 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:49 ntpd[16117]: Soliciting pool server 85.220.190.246 2026-03-09T17:14:49.756 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:49 ntpd[16117]: Soliciting pool server 172.104.154.182 2026-03-09T17:14:49.756 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:49 ntpd[16117]: Soliciting pool server 172.104.154.182 2026-03-09T17:14:52.777 INFO:teuthology.orchestra.run.vm09.stdout: 9 Mar 17:14:52 ntpd[16117]: ntpd: time slew +0.001364 s 2026-03-09T17:14:52.777 INFO:teuthology.orchestra.run.vm09.stdout:ntpd: time slew +0.001364s 2026-03-09T17:14:52.798 INFO:teuthology.orchestra.run.vm09.stdout: remote refid st t when poll reach delay offset jitter 2026-03-09T17:14:52.798 INFO:teuthology.orchestra.run.vm09.stdout:============================================================================== 2026-03-09T17:14:52.798 INFO:teuthology.orchestra.run.vm09.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:52.798 INFO:teuthology.orchestra.run.vm09.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:52.798 INFO:teuthology.orchestra.run.vm09.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:52.798 INFO:teuthology.orchestra.run.vm09.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:52.798 INFO:teuthology.orchestra.run.vm09.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:53.749 INFO:teuthology.orchestra.run.vm06.stdout: 9 Mar 17:14:53 ntpd[16105]: ntpd: time slew +0.011469 s 2026-03-09T17:14:53.749 INFO:teuthology.orchestra.run.vm06.stdout:ntpd: time slew +0.011469s 2026-03-09T17:14:53.771 INFO:teuthology.orchestra.run.vm06.stdout: remote refid st t when poll reach delay offset jitter 2026-03-09T17:14:53.771 INFO:teuthology.orchestra.run.vm06.stdout:============================================================================== 2026-03-09T17:14:53.771 INFO:teuthology.orchestra.run.vm06.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:53.771 INFO:teuthology.orchestra.run.vm06.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:53.771 INFO:teuthology.orchestra.run.vm06.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:53.771 INFO:teuthology.orchestra.run.vm06.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:53.771 INFO:teuthology.orchestra.run.vm06.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:53.771 INFO:teuthology.run_tasks:Running task nvme_loop... 2026-03-09T17:14:53.774 INFO:tasks.nvme_loop:Setting up nvme_loop on scratch devices... 2026-03-09T17:14:53.774 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-03-09T17:14:53.774 DEBUG:teuthology.orchestra.run.vm06:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T17:14:53.818 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:14:53.818 DEBUG:teuthology.orchestra.run.vm06:> ls /dev/[sv]d? 2026-03-09T17:14:53.862 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vda 2026-03-09T17:14:53.863 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdb 2026-03-09T17:14:53.863 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdc 2026-03-09T17:14:53.863 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vdd 2026-03-09T17:14:53.863 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vde 2026-03-09T17:14:53.863 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-09T17:14:53.863 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-09T17:14:53.863 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdb 2026-03-09T17:14:53.907 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdb 2026-03-09T17:14:53.907 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-09T17:14:53.907 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 24 Links: 1 Device type: fe,10 2026-03-09T17:14:53.907 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:14:53.907 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:11:56.185348615 +0000 2026-03-09T17:14:53.907 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:11:55.097348615 +0000 2026-03-09T17:14:53.907 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:11:55.097348615 +0000 2026-03-09T17:14:53.907 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-03-09T17:14:53.907 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-09T17:14:53.955 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:14:53.955 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:14:53.955 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000165562 s, 3.1 MB/s 2026-03-09T17:14:53.956 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-09T17:14:54.004 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdc 2026-03-09T17:14:54.050 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdc 2026-03-09T17:14:54.050 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-09T17:14:54.050 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 25 Links: 1 Device type: fe,20 2026-03-09T17:14:54.050 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:14:54.050 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:11:56.193348615 +0000 2026-03-09T17:14:54.050 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:11:55.089348615 +0000 2026-03-09T17:14:54.050 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:11:55.089348615 +0000 2026-03-09T17:14:54.050 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-03-09T17:14:54.050 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-09T17:14:54.097 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:14:54.097 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:14:54.097 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000126651 s, 4.0 MB/s 2026-03-09T17:14:54.098 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-09T17:14:54.143 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vdd 2026-03-09T17:14:54.190 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vdd 2026-03-09T17:14:54.190 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-09T17:14:54.190 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 26 Links: 1 Device type: fe,30 2026-03-09T17:14:54.190 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:14:54.190 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:11:56.185348615 +0000 2026-03-09T17:14:54.190 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:11:55.089348615 +0000 2026-03-09T17:14:54.190 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:11:55.089348615 +0000 2026-03-09T17:14:54.190 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-03-09T17:14:54.190 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-09T17:14:54.237 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:14:54.237 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:14:54.237 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000135181 s, 3.8 MB/s 2026-03-09T17:14:54.238 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-09T17:14:54.283 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vde 2026-03-09T17:14:54.330 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vde 2026-03-09T17:14:54.330 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-09T17:14:54.330 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 27 Links: 1 Device type: fe,40 2026-03-09T17:14:54.330 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T17:14:54.330 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-03-09 17:11:56.193348615 +0000 2026-03-09T17:14:54.330 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-03-09 17:11:55.093348615 +0000 2026-03-09T17:14:54.330 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-03-09 17:11:55.093348615 +0000 2026-03-09T17:14:54.330 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-03-09T17:14:54.330 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-09T17:14:54.377 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-03-09T17:14:54.377 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-03-09T17:14:54.377 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.00014747 s, 3.5 MB/s 2026-03-09T17:14:54.378 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-09T17:14:54.423 DEBUG:teuthology.orchestra.run.vm06:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-03-09T17:14:54.476 INFO:teuthology.orchestra.run.vm06.stderr:modprobe: FATAL: Module nvme_loop not found in directory /lib/modules/5.15.0-1092-kvm 2026-03-09T17:14:54.477 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:14:54.477 ERROR:teuthology.run_tasks:Saw exception from tasks. Traceback (most recent call last): File "/home/teuthos/teuthology/teuthology/run_tasks.py", line 112, in run_tasks manager.__enter__() File "/home/teuthos/.local/share/uv/python/cpython-3.10.19-linux-x86_64-gnu/lib/python3.10/contextlib.py", line 135, in __enter__ return next(self.gen) File "/home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks/nvme_loop.py", line 27, in task remote.run( File "/home/teuthos/teuthology/teuthology/orchestra/remote.py", line 575, in run r = self._runner(client=self.ssh, name=self.shortname, **kwargs) File "/home/teuthos/teuthology/teuthology/orchestra/run.py", line 461, in run r.wait() File "/home/teuthos/teuthology/teuthology/orchestra/run.py", line 161, in wait self._raise_for_status() File "/home/teuthos/teuthology/teuthology/orchestra/run.py", line 181, in _raise_for_status raise CommandFailedError( teuthology.exceptions.CommandFailedError: Command failed on vm06 with status 1: "grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype" 2026-03-09T17:14:54.478 DEBUG:teuthology.run_tasks:Unwinding manager nvme_loop 2026-03-09T17:14:54.479 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-09T17:14:54.481 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-09T17:14:54.481 DEBUG:teuthology.orchestra.run.vm06:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T17:14:54.520 DEBUG:teuthology.orchestra.run.vm09:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T17:14:54.524 INFO:teuthology.orchestra.run.vm06.stdout: remote refid st t when poll reach delay offset jitter 2026-03-09T17:14:54.524 INFO:teuthology.orchestra.run.vm06.stdout:============================================================================== 2026-03-09T17:14:54.524 INFO:teuthology.orchestra.run.vm06.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:54.524 INFO:teuthology.orchestra.run.vm06.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:54.524 INFO:teuthology.orchestra.run.vm06.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:54.524 INFO:teuthology.orchestra.run.vm06.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:54.524 INFO:teuthology.orchestra.run.vm06.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:54.525 INFO:teuthology.orchestra.run.vm09.stdout: remote refid st t when poll reach delay offset jitter 2026-03-09T17:14:54.525 INFO:teuthology.orchestra.run.vm09.stdout:============================================================================== 2026-03-09T17:14:54.525 INFO:teuthology.orchestra.run.vm09.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:54.525 INFO:teuthology.orchestra.run.vm09.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:54.525 INFO:teuthology.orchestra.run.vm09.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:54.525 INFO:teuthology.orchestra.run.vm09.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:54.525 INFO:teuthology.orchestra.run.vm09.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-09T17:14:54.526 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-09T17:14:54.529 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-09T17:14:54.529 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-09T17:14:54.531 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-09T17:14:54.534 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-09T17:14:54.537 INFO:teuthology.task.internal:Duration was 135.885044 seconds 2026-03-09T17:14:54.537 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-09T17:14:54.539 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-09T17:14:54.539 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T17:14:54.568 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T17:14:54.590 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-09T17:14:54.590 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm06.local 2026-03-09T17:14:54.590 DEBUG:teuthology.orchestra.run.vm06:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T17:14:54.642 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm09.local 2026-03-09T17:14:54.643 DEBUG:teuthology.orchestra.run.vm09:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T17:14:54.654 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-09T17:14:54.654 DEBUG:teuthology.orchestra.run.vm06:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T17:14:54.684 DEBUG:teuthology.orchestra.run.vm09:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T17:14:54.714 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-09T17:14:54.714 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T17:14:54.744 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T17:14:54.749 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T17:14:54.749 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T17:14:54.750 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T17:14:54.750 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T17:14:54.750 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T17:14:54.753 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 83.6% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T17:14:54.766 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T17:14:54.766 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T17:14:54.766 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: gzip 0.0% -5 -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz --verbose 2026-03-09T17:14:54.766 INFO:teuthology.orchestra.run.vm09.stderr: -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T17:14:54.766 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T17:14:54.769 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 83.6% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T17:14:54.770 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-09T17:14:54.772 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-09T17:14:54.772 DEBUG:teuthology.orchestra.run.vm06:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T17:14:54.802 DEBUG:teuthology.orchestra.run.vm09:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T17:14:54.819 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-09T17:14:54.822 DEBUG:teuthology.orchestra.run.vm06:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T17:14:54.843 DEBUG:teuthology.orchestra.run.vm09:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T17:14:54.850 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = core 2026-03-09T17:14:54.869 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = core 2026-03-09T17:14:54.877 DEBUG:teuthology.orchestra.run.vm06:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T17:14:54.901 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:14:54.901 DEBUG:teuthology.orchestra.run.vm09:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T17:14:54.920 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T17:14:54.920 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-09T17:14:54.923 INFO:teuthology.task.internal:Transferring archived files... 2026-03-09T17:14:54.923 DEBUG:teuthology.misc:Transferring archived files from vm06:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/582/remote/vm06 2026-03-09T17:14:54.923 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T17:14:54.951 DEBUG:teuthology.misc:Transferring archived files from vm09:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/582/remote/vm09 2026-03-09T17:14:54.951 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T17:14:54.971 INFO:teuthology.task.internal:Removing archive directory... 2026-03-09T17:14:54.971 DEBUG:teuthology.orchestra.run.vm06:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T17:14:54.996 DEBUG:teuthology.orchestra.run.vm09:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T17:14:55.017 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-09T17:14:55.020 INFO:teuthology.task.internal:Not uploading archives. 2026-03-09T17:14:55.020 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-09T17:14:55.022 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-09T17:14:55.022 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T17:14:55.040 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T17:14:55.042 INFO:teuthology.orchestra.run.vm06.stdout: 258077 4 drwxr-xr-x 2 ubuntu ubuntu 4096 Mar 9 17:14 /home/ubuntu/cephtest 2026-03-09T17:14:55.061 INFO:teuthology.orchestra.run.vm09.stdout: 258077 4 drwxr-xr-x 2 ubuntu ubuntu 4096 Mar 9 17:14 /home/ubuntu/cephtest 2026-03-09T17:14:55.062 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-09T17:14:55.069 INFO:teuthology.run:Summary data: description: orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} duration: 135.8850438594818 failure_reason: 'Command failed on vm06 with status 1: "grep ''^nvme_loop'' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype"' owner: kyr sentry_event: null status: fail success: false 2026-03-09T17:14:55.069 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T17:14:55.092 INFO:teuthology.run:FAIL