2026-03-10T18:19:12.215 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-10T18:19:12.223 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T18:19:12.243 INFO:teuthology.run:Config: archive_path: /archive/irq0-2026-03-10_17:49:15-rgw:verify-cobaltcore-storage-v19.2.3-fasttrack-8-none-default-vps/1107 branch: cobaltcore-storage-v19.2.3-fasttrack-8 description: rgw:verify/{0-install accounts$/{none} clusters/fixed-2 datacache/no_datacache frontend/beast ignore-pg-availability inline-data$/{on} msgr-failures/few objectstore/bluestore-bitmap overrides proto/https rgw_pool_type/replicated s3tests-branch sharding$/{default} striping$/{stripe-greater-than-chunk} supported-random-distro$/{rocky_latest} tasks/{bucket-check cls mp_reupload rados-pool-quota ragweed reshard s3tests versioning zzz-s3tests-java} validater/lockdep} email: null first_in_suite: false flavor: default job_id: '1107' last_in_suite: false machine_type: vps name: irq0-2026-03-10_17:49:15-rgw:verify-cobaltcore-storage-v19.2.3-fasttrack-8-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: rocky os_version: '9.7' overrides: admin_socket: branch: cobaltcore-storage-v19.2.3-fasttrack-8 ansible.cephlab: branch: main repo: https://github.com/kshtsk/ceph-cm-ansible.git skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: client: debug rgw: 20 debug rgw notification: 20 rgw bucket counters cache: true rgw crypt require ssl: false rgw crypt s3 kms backend: testing rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= rgw max chunk size: 4194304 rgw obj stripe size: 6291456 rgw s3 auth use sts: true rgw sts key: abcdefghijklmnop rgw torrent flag: true rgw user counters cache: true setgroup: ceph setuser: ceph global: mon client directed command retry: 5 ms inject socket failures: 5000 osd_max_pg_log_entries: 10 osd_min_pg_log_entries: 10 mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 lockdep: true osd: bdev async discard: true bdev enable discard: true bluestore allocator: bitmap bluestore block size: 96636764160 bluestore fsck on mount: true debug bluefs: 1/20 debug bluestore: 1/20 debug ms: 1 debug osd: 20 debug rocksdb: 4/10 lockdep: true mon osd backfillfull_ratio: 0.85 mon osd full ratio: 0.9 mon osd nearfull ratio: 0.8 osd failsafe full ratio: 0.95 osd mclock iops capacity threshold hdd: 49000 osd objectstore: bluestore osd shutdown pgref assert: true flavor: default fs: xfs log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - \(PG_AVAILABILITY\) - \(PG_DEGRADED\) - \(POOL_APP_NOT_ENABLED\) - not have an application enabled - \(OSD_SLOW_PING_TIME - reached quota - POOL_FULL - pool\(s\) full sha1: 93f09337d0c8b83522903facf60962a7cb352653 ceph-deploy: bluestore: true conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} osd: bdev async discard: true bdev enable discard: true bluestore block size: 96636764160 bluestore fsck on mount: true debug bluefs: 1/20 debug bluestore: 1/20 debug rocksdb: 4/10 mon osd backfillfull_ratio: 0.85 mon osd full ratio: 0.9 mon osd nearfull ratio: 0.8 osd failsafe full ratio: 0.95 osd objectstore: bluestore fs: xfs cephadm: cephadm_binary_url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm containers: image: harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-8 install: ceph: flavor: default sha1: 93f09337d0c8b83522903facf60962a7cb352653 extra_system_packages: deb: - python3-xmltodict - s3cmd rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - s3cmd repos: - name: ceph-source priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-19.2.3-41-g93f09337d0c/el9.clyso/SRPMS - name: ceph-noarch priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-19.2.3-41-g93f09337d0c/el9.clyso/noarch - name: ceph priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-19.2.3-41-g93f09337d0c/el9.clyso/x86_64 openssl_keys: rgw.client.0: ca: root client: client.0 embed-key: true root: client: client.0 cn: teuthology install: - client.0 key-type: rsa:4096 rgw: client.0: ssl certificate: rgw.client.0 compression type: random ec-data-pool: false frontend: beast storage classes: LUKEWARM, FROZEN s3tests: accounts: iam alt root: RGW99999999999999999 iam root: RGW88888888888888888 force-branch: ceph-squid storage classes: LUKEWARM, FROZEN selinux: allowlist: - scontext=system_u:system_r:getty_t:s0 thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: 0.5 workunit: branch: tt-19.2.3-fasttrack-8 sha1: 50e98e8318117ef866947b5847d947538c5efcdc owner: irq0 priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - mon.a - mon.c - mgr.y - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - node-exporter.a - - mon.b - mgr.x - osd.4 - osd.5 - osd.6 - osd.7 - client.1 - prometheus.a - node-exporter.b seed: 8214 sha1: 93f09337d0c8b83522903facf60962a7cb352653 sleep_before_teardown: 0 suite: rgw:verify suite_branch: tt-19.2.3-fasttrack-8 suite_path: /home/teuthos/src/github.com_kshtsk_ceph_50e98e8318117ef866947b5847d947538c5efcdc/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 50e98e8318117ef866947b5847d947538c5efcdc targets: vm01.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGNaxnNz/iYy2fRs/Djmuol7ulImZ7VC7TT5Bk3BwGmWq64oLauY2G95UIlZ6iWnPCfoq6PbR04vALqE1oX6Ytk= vm04.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNwqk6szmmxogenu4l7MSg7SGRNBhT1UHHogU2fAnwMUYSfTJmgevYZNe7IEQUKdDuF4nSx0YJDCKYZLAvwg2rU= tasks: - install: extra_system_packages: deb: - s3cmd - maven rpm: - s3cmd - maven - ceph: null - openssl_keys: null - rgw: client.0: null - tox: - client.0 - workunit: clients: client.0: - rgw/run-bucket-check.sh - workunit: clients: client.0: - cls/test_cls_lock.sh - cls/test_cls_log.sh - cls/test_cls_refcount.sh - cls/test_cls_rgw.sh - cls/test_cls_rgw_gc.sh - cls/test_cls_rgw_stats.sh - cls/test_cls_cmpomap.sh - cls/test_cls_2pc_queue.sh - cls/test_cls_user.sh - rgw/test_rgw_gc_log.sh - rgw/test_rgw_obj.sh - rgw/test_librgw_file.sh - rgw/test_awssdkv4_sig.sh - workunit: clients: client.0: - rgw/test_rgw_s3_mp_reupload.sh - workunit: clients: client.0: - rgw/run-rados-pool-quota.sh - ragweed: client.0: default-branch: ceph-squid rgw_server: client.0 stages: prepare,check - workunit: clients: client.0: - rgw/run-reshard.sh - s3tests: client.0: rgw_server: client.0 - workunit: clients: client.0: - rgw/run-versioning.sh - s3tests-java: client.0: force-branch: ceph-squid force-repo: https://github.com/ceph/java_s3tests.git teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-10_17:49:15 tube: vps user: irq0 verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-10T18:19:12.243 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_50e98e8318117ef866947b5847d947538c5efcdc/qa; will attempt to use it 2026-03-10T18:19:12.243 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_50e98e8318117ef866947b5847d947538c5efcdc/qa/tasks 2026-03-10T18:19:12.243 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-10T18:19:12.244 INFO:teuthology.task.internal:Saving configuration 2026-03-10T18:19:12.250 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-10T18:19:12.251 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-10T18:19:12.256 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm01.local', 'description': '/archive/irq0-2026-03-10_17:49:15-rgw:verify-cobaltcore-storage-v19.2.3-fasttrack-8-none-default-vps/1107', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'rocky', 'os_version': '9.7', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 18:18:10.460330', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:01', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGNaxnNz/iYy2fRs/Djmuol7ulImZ7VC7TT5Bk3BwGmWq64oLauY2G95UIlZ6iWnPCfoq6PbR04vALqE1oX6Ytk='} 2026-03-10T18:19:12.261 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm04.local', 'description': '/archive/irq0-2026-03-10_17:49:15-rgw:verify-cobaltcore-storage-v19.2.3-fasttrack-8-none-default-vps/1107', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'rocky', 'os_version': '9.7', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 18:18:10.460817', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:04', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNwqk6szmmxogenu4l7MSg7SGRNBhT1UHHogU2fAnwMUYSfTJmgevYZNe7IEQUKdDuF4nSx0YJDCKYZLAvwg2rU='} 2026-03-10T18:19:12.261 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-10T18:19:12.261 INFO:teuthology.task.internal:roles: ubuntu@vm01.local - ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a'] 2026-03-10T18:19:12.262 INFO:teuthology.task.internal:roles: ubuntu@vm04.local - ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'node-exporter.b'] 2026-03-10T18:19:12.262 INFO:teuthology.run_tasks:Running task console_log... 2026-03-10T18:19:12.266 DEBUG:teuthology.task.console_log:vm01 does not support IPMI; excluding 2026-03-10T18:19:12.271 DEBUG:teuthology.task.console_log:vm04 does not support IPMI; excluding 2026-03-10T18:19:12.271 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7f096b048040>, signals=[15]) 2026-03-10T18:19:12.271 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-10T18:19:12.272 INFO:teuthology.task.internal:Opening connections... 2026-03-10T18:19:12.272 DEBUG:teuthology.task.internal:connecting to ubuntu@vm01.local 2026-03-10T18:19:12.272 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T18:19:12.332 DEBUG:teuthology.task.internal:connecting to ubuntu@vm04.local 2026-03-10T18:19:12.332 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T18:19:12.394 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-10T18:19:12.395 DEBUG:teuthology.orchestra.run.vm01:> uname -m 2026-03-10T18:19:12.450 INFO:teuthology.orchestra.run.vm01.stdout:x86_64 2026-03-10T18:19:12.450 DEBUG:teuthology.orchestra.run.vm01:> cat /etc/os-release 2026-03-10T18:19:12.504 INFO:teuthology.orchestra.run.vm01.stdout:NAME="Rocky Linux" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:VERSION="9.7 (Blue Onyx)" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:ID="rocky" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:ID_LIKE="rhel centos fedora" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:VERSION_ID="9.7" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:PLATFORM_ID="platform:el9" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:PRETTY_NAME="Rocky Linux 9.7 (Blue Onyx)" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:ANSI_COLOR="0;32" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:LOGO="fedora-logo-icon" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:CPE_NAME="cpe:/o:rocky:rocky:9::baseos" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:HOME_URL="https://rockylinux.org/" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:VENDOR_NAME="RESF" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:VENDOR_URL="https://resf.org/" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:BUG_REPORT_URL="https://bugs.rockylinux.org/" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:SUPPORT_END="2032-05-31" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:ROCKY_SUPPORT_PRODUCT="Rocky-Linux-9" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:ROCKY_SUPPORT_PRODUCT_VERSION="9.7" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT="Rocky Linux" 2026-03-10T18:19:12.505 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="9.7" 2026-03-10T18:19:12.505 INFO:teuthology.lock.ops:Updating vm01.local on lock server 2026-03-10T18:19:12.509 DEBUG:teuthology.orchestra.run.vm04:> uname -m 2026-03-10T18:19:12.526 INFO:teuthology.orchestra.run.vm04.stdout:x86_64 2026-03-10T18:19:12.526 DEBUG:teuthology.orchestra.run.vm04:> cat /etc/os-release 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:NAME="Rocky Linux" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:VERSION="9.7 (Blue Onyx)" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:ID="rocky" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:ID_LIKE="rhel centos fedora" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:VERSION_ID="9.7" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:PLATFORM_ID="platform:el9" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:PRETTY_NAME="Rocky Linux 9.7 (Blue Onyx)" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:ANSI_COLOR="0;32" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:LOGO="fedora-logo-icon" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:CPE_NAME="cpe:/o:rocky:rocky:9::baseos" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:HOME_URL="https://rockylinux.org/" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:VENDOR_NAME="RESF" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:VENDOR_URL="https://resf.org/" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:BUG_REPORT_URL="https://bugs.rockylinux.org/" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:SUPPORT_END="2032-05-31" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:ROCKY_SUPPORT_PRODUCT="Rocky-Linux-9" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:ROCKY_SUPPORT_PRODUCT_VERSION="9.7" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT="Rocky Linux" 2026-03-10T18:19:12.580 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="9.7" 2026-03-10T18:19:12.581 INFO:teuthology.lock.ops:Updating vm04.local on lock server 2026-03-10T18:19:12.621 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-10T18:19:12.623 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-10T18:19:12.624 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-10T18:19:12.624 DEBUG:teuthology.orchestra.run.vm01:> test '!' -e /home/ubuntu/cephtest 2026-03-10T18:19:12.625 DEBUG:teuthology.orchestra.run.vm04:> test '!' -e /home/ubuntu/cephtest 2026-03-10T18:19:12.639 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-10T18:19:12.640 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-10T18:19:12.640 DEBUG:teuthology.orchestra.run.vm01:> test -z $(ls -A /var/lib/ceph) 2026-03-10T18:19:12.680 DEBUG:teuthology.orchestra.run.vm04:> test -z $(ls -A /var/lib/ceph) 2026-03-10T18:19:12.694 INFO:teuthology.orchestra.run.vm01.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T18:19:12.694 INFO:teuthology.orchestra.run.vm04.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T18:19:12.695 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-10T18:19:12.702 DEBUG:teuthology.orchestra.run.vm01:> test -e /ceph-qa-ready 2026-03-10T18:19:12.749 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T18:19:12.988 DEBUG:teuthology.orchestra.run.vm04:> test -e /ceph-qa-ready 2026-03-10T18:19:13.003 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T18:19:13.193 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-10T18:19:13.194 INFO:teuthology.task.internal:Creating test directory... 2026-03-10T18:19:13.194 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T18:19:13.196 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T18:19:13.211 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-10T18:19:13.212 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-10T18:19:13.213 INFO:teuthology.task.internal:Creating archive directory... 2026-03-10T18:19:13.213 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T18:19:13.253 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T18:19:13.271 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-10T18:19:13.272 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-10T18:19:13.272 DEBUG:teuthology.orchestra.run.vm01:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T18:19:13.322 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T18:19:13.322 DEBUG:teuthology.orchestra.run.vm04:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T18:19:13.337 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T18:19:13.338 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T18:19:13.364 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T18:19:13.386 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T18:19:13.397 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T18:19:13.403 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T18:19:13.412 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T18:19:13.413 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-10T18:19:13.415 INFO:teuthology.task.internal:Configuring sudo... 2026-03-10T18:19:13.415 DEBUG:teuthology.orchestra.run.vm01:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T18:19:13.440 DEBUG:teuthology.orchestra.run.vm04:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T18:19:13.485 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-10T18:19:13.489 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-10T18:19:13.489 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T18:19:13.512 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T18:19:13.543 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T18:19:13.594 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T18:19:13.648 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-10T18:19:13.648 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T18:19:13.707 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T18:19:13.729 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T18:19:13.787 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-10T18:19:13.787 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T18:19:13.845 DEBUG:teuthology.orchestra.run.vm01:> sudo service rsyslog restart 2026-03-10T18:19:13.847 DEBUG:teuthology.orchestra.run.vm04:> sudo service rsyslog restart 2026-03-10T18:19:13.874 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T18:19:13.915 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T18:19:14.144 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-10T18:19:14.146 INFO:teuthology.task.internal:Starting timer... 2026-03-10T18:19:14.146 INFO:teuthology.run_tasks:Running task pcp... 2026-03-10T18:19:14.149 INFO:teuthology.run_tasks:Running task selinux... 2026-03-10T18:19:14.150 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:getty_t:s0']} 2026-03-10T18:19:14.151 INFO:teuthology.task.selinux:Excluding vm01: VMs are not yet supported 2026-03-10T18:19:14.151 INFO:teuthology.task.selinux:Excluding vm04: VMs are not yet supported 2026-03-10T18:19:14.151 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-10T18:19:14.151 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-10T18:19:14.151 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-10T18:19:14.151 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-10T18:19:14.152 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'repo': 'https://github.com/kshtsk/ceph-cm-ansible.git', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-10T18:19:14.152 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/kshtsk/ceph-cm-ansible.git 2026-03-10T18:19:14.153 INFO:teuthology.repo_utils:Fetching github.com_kshtsk_ceph-cm-ansible_main from origin 2026-03-10T18:19:14.661 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main to origin/main 2026-03-10T18:19:14.666 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-10T18:19:14.666 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventoryrra5ukbk --limit vm01.local,vm04.local /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-10T18:22:21.358 INFO:teuthology.task.ansible:Archiving ansible failure log at: /archive/irq0-2026-03-10_17:49:15-rgw:verify-cobaltcore-storage-v19.2.3-fasttrack-8-none-default-vps/1107/ansible_failures.yaml 2026-03-10T18:22:21.359 ERROR:teuthology.run_tasks:Saw exception from tasks. Traceback (most recent call last): File "/home/teuthos/teuthology/teuthology/run_tasks.py", line 112, in run_tasks manager.__enter__() File "/home/teuthos/teuthology/teuthology/task/__init__.py", line 123, in __enter__ self.begin() File "/home/teuthos/teuthology/teuthology/task/ansible.py", line 490, in begin super(CephLab, self).begin() File "/home/teuthos/teuthology/teuthology/task/ansible.py", line 324, in begin self.execute_playbook() File "/home/teuthos/teuthology/teuthology/task/ansible.py", line 356, in execute_playbook self._handle_failure(command, status) File "/home/teuthos/teuthology/teuthology/task/ansible.py", line 384, in _handle_failure raise AnsibleFailedError(failures) teuthology.exceptions.AnsibleFailedError: vm01.local: _ansible_no_log: false changed: false invocation: module_args: allow_downgrade: false allowerasing: false autoremove: false best: null bugfix: false cacheonly: false conf_file: null disable_excludes: null disable_gpg_check: false disable_plugin: [] disablerepo: [] download_dir: null download_only: false enable_plugin: [] enablerepo: [] exclude: [] install_repoquery: true install_weak_deps: true installroot: / list: null lock_timeout: 30 name: - dnf-utils - sysstat - libedit - boost-thread - xfsprogs - gdisk - parted - libgcrypt - fuse-libs - openssl - libuuid - podman - attr - lsof - gettext - bc - xfsdump - blktrace - usbredir - valgrind - nfs-utils - ncurses-devel - gcc - git - genisoimage - qemu-img - qemu-kvm-core - qemu-kvm-block-rbd - libacl-devel - lvm2 - autoconf - gdb - iozone nobest: null releasever: null security: false skip_broken: false sslverify: true state: present update_cache: false update_only: false use_backend: auto validate_certs: true msg: 'Failed to download packages: podman-6:5.6.0-14.el9_7.x86_64: Cannot download, all mirrors were already tried without success' results: [] 2026-03-10T18:22:21.359 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-10T18:22:21.361 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-10T18:22:21.361 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-10T18:22:21.362 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-10T18:22:21.364 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-10T18:22:21.366 INFO:teuthology.task.internal:Duration was 187.219525 seconds 2026-03-10T18:22:21.366 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-10T18:22:21.367 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-10T18:22:21.368 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T18:22:21.369 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T18:22:21.425 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T18:22:21.425 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T18:22:31.006 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-10T18:22:31.006 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm01.local 2026-03-10T18:22:31.006 DEBUG:teuthology.orchestra.run.vm01:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T18:22:31.053 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm04.local 2026-03-10T18:22:31.054 DEBUG:teuthology.orchestra.run.vm04:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T18:22:31.091 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-10T18:22:31.091 DEBUG:teuthology.orchestra.run.vm01:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T18:22:31.093 DEBUG:teuthology.orchestra.run.vm04:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T18:22:31.437 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-10T18:22:31.437 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T18:22:31.438 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T18:22:31.471 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T18:22:31.472 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T18:22:31.472 INFO:teuthology.orchestra.run.vm01.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T18:22:31.472 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T18:22:31.472 INFO:teuthology.orchestra.run.vm01.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T18:22:31.479 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T18:22:31.479 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T18:22:31.480 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T18:22:31.480 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T18:22:31.480 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T18:22:31.565 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.4% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T18:22:31.580 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.4% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T18:22:31.582 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-10T18:22:31.584 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-10T18:22:31.584 DEBUG:teuthology.orchestra.run.vm01:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T18:22:31.640 DEBUG:teuthology.orchestra.run.vm04:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T18:22:31.670 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-10T18:22:31.672 DEBUG:teuthology.orchestra.run.vm01:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T18:22:31.684 DEBUG:teuthology.orchestra.run.vm04:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T18:22:31.714 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = core 2026-03-10T18:22:31.740 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = core 2026-03-10T18:22:31.757 DEBUG:teuthology.orchestra.run.vm01:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T18:22:31.792 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T18:22:31.793 DEBUG:teuthology.orchestra.run.vm04:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T18:22:31.816 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T18:22:31.816 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-10T18:22:31.818 INFO:teuthology.task.internal:Transferring archived files... 2026-03-10T18:22:31.818 DEBUG:teuthology.misc:Transferring archived files from vm01:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-10_17:49:15-rgw:verify-cobaltcore-storage-v19.2.3-fasttrack-8-none-default-vps/1107/remote/vm01 2026-03-10T18:22:31.818 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T18:22:31.869 DEBUG:teuthology.misc:Transferring archived files from vm04:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-10_17:49:15-rgw:verify-cobaltcore-storage-v19.2.3-fasttrack-8-none-default-vps/1107/remote/vm04 2026-03-10T18:22:31.869 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T18:22:31.902 INFO:teuthology.task.internal:Removing archive directory... 2026-03-10T18:22:31.902 DEBUG:teuthology.orchestra.run.vm01:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T18:22:31.912 DEBUG:teuthology.orchestra.run.vm04:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T18:22:31.968 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-10T18:22:31.970 INFO:teuthology.task.internal:Not uploading archives. 2026-03-10T18:22:31.970 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-10T18:22:31.972 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-10T18:22:31.972 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T18:22:31.974 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T18:22:31.996 INFO:teuthology.orchestra.run.vm01.stdout: 83886506 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 18:22 /home/ubuntu/cephtest 2026-03-10T18:22:32.029 INFO:teuthology.orchestra.run.vm04.stdout: 83886251 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 18:22 /home/ubuntu/cephtest 2026-03-10T18:22:32.030 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-10T18:22:32.036 INFO:teuthology.run:Summary data: description: rgw:verify/{0-install accounts$/{none} clusters/fixed-2 datacache/no_datacache frontend/beast ignore-pg-availability inline-data$/{on} msgr-failures/few objectstore/bluestore-bitmap overrides proto/https rgw_pool_type/replicated s3tests-branch sharding$/{default} striping$/{stripe-greater-than-chunk} supported-random-distro$/{rocky_latest} tasks/{bucket-check cls mp_reupload rados-pool-quota ragweed reshard s3tests versioning zzz-s3tests-java} validater/lockdep} duration: 187.21952509880066 failure_reason: 'vm01.local: _ansible_no_log: false changed: false invocation: module_args: allow_downgrade: false allowerasing: false autoremove: false best: null bugfix: false cacheonly: false conf_file: null disable_excludes: null disable_gpg_check: false disable_plugin: [] disablerepo: [] download_dir: null download_only: false enable_plugin: [] enablerepo: [] exclude: [] install_repoquery: true install_weak_deps: true installroot: / list: null lock_timeout: 30 name: - dnf-utils - sysstat - libedit - boost-thread - xfsprogs - gdisk - parted - libgcrypt - fuse-libs - openssl - libuuid - podman - attr - lsof - gettext - bc - xfsdump - blktrace - usbredir - valgrind - nfs-utils - ncurses-devel - gcc - git - genisoimage - qemu-img - qemu-kvm-core - qemu-kvm-block-rbd - libacl-devel - lvm2 - autoconf - gdb - iozone nobest: null releasever: null security: false skip_broken: false sslverify: true state: present update_cache: false update_only: false use_backend: auto validate_certs: true msg: ''Failed to download packages: podman-6:5.6.0-14.el9_7.x86_64: Cannot download, all mirrors were already tried without success'' results: []' owner: irq0 sentry_event: null status: dead success: false 2026-03-10T18:22:32.036 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T18:22:32.059 INFO:teuthology.run:DEAD