Nodes: vm03 vm06

Description: rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/deploy-raw}

Log: https://schulp.build.clyso.com/a/kyr-2026-03-30_19:18:25-rados-tentacle-none-default-vps/4211/teuthology.log

Failure Reason:

"grep: /var/log/ceph/b40e4dc8-2c99-11f1-a594-afe693e646af/ceph.log: No such file or directory" in cluster log

  • log_href: https://schulp.build.clyso.com/a/kyr-2026-03-30_19:18:25-rados-tentacle-none-default-vps/4211/teuthology.log
  • archive_path: /archive/kyr-2026-03-30_19:18:25-rados-tentacle-none-default-vps/4211
  • description: rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/deploy-raw}
  • duration: 0:01:47
  • email: (Empty)
  • failure_reason: "grep: /var/log/ceph/b40e4dc8-2c99-11f1-a594-afe693e646af/ceph.log: No such file or directory" in cluster log
  • flavor: default
  • job_id: 4211
  • kernel: (Empty)
  • last_in_suite: False
  • machine_type: vps
  • name: kyr-2026-03-30_19:18:25-rados-tentacle-none-default-vps
  • nuke_on_error: (Empty)
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: tentacle
    • ansible.cephlab:
      • branch: main
      • repo: https://github.com/kshtsk/ceph-cm-ansible.git
      • skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs
      • vars:
        • logical_volumes:
          • lv_1:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_2:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_3:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_4:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
        • timezone: UTC
        • volume_groups:
          • vg_nvme:
            • pvs: /dev/vdb,/dev/vdc,/dev/vdd,/dev/vde
    • ceph:
      • conf:
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
          • osd mclock iops capacity threshold hdd: 49000
          • osd shutdown pgref assert: True
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • OSD_DOWN
        • CEPHADM_FAILED_DAEMON
        • but is still running
        • PG_DEGRADED
      • log-only-match:
        • CEPHADM_
      • sha1: 70f8415b300f041766fa27faf7d5472699e32388
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
    • cephadm:
      • cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm
      • raw-osds: True
    • install:
      • ceph:
        • flavor: default
        • sha1: 70f8415b300f041766fa27faf7d5472699e32388
      • extra_system_packages:
        • deb:
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
        • rpm:
          • bzip2
          • perl-Test-Harness
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
    • selinux:
      • allowlist:
        • scontext=system_u:system_r:logrotate_t:s0
    • workunit:
      • branch: tt-tentacle
      • sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • owner: kyr
  • pid: 595635
  • roles:
    • ['host.a', 'client.0']
    • ['host.b', 'client.1']
  • sentry_event: (Empty)
  • status: fail
  • success: False
  • branch: tentacle
  • seed: 2483
  • sha1: 70f8415b300f041766fa27faf7d5472699e32388
  • subset: 1/100000
  • suite: rados
  • suite_branch: tt-tentacle
  • suite_path: /home/teuthos/src/github.com_kshtsk_ceph_0392f78529848ec72469e8e431875cb98d3a5fb4/qa
  • suite_relpath: qa
  • suite_repo: https://github.com/kshtsk/ceph.git
  • suite_sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • targets:
    • vm03.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMhNScVnLZML9HcTb098SjSxT5fE+2l+iEKKURo3xCyn1i7aw2z5j/JyDS5Fn5wswphJEiqWtJTjD0gh2ZJBMGs=
    • vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOsBSbDhwyUniUj7Px7OWb7YSfD8VA6tmYWyMePU2MzuTwnQVw4qqKpwe1atdHoo8QrAFRXp+XwBbJh+4qHXBsA=
  • tasks:
      • internal.check_packages
      • internal.buildpackages_prep
      • internal.save_config
      • internal.check_lock
      • internal.add_remotes
      • console_log
      • internal.connect
      • internal.push_inventory
      • internal.serialize_remote_roles
      • internal.check_conflict
      • internal.check_ceph_data
      • internal.vm_setup
      • internal.base
      • internal.archive_upload
      • internal.archive
      • internal.coredump
      • internal.sudo
      • internal.syslog
      • internal.timer
      • pcp
      • selinux
      • ansible.cephlab
      • clock
      • pexec:
        • all:
          • sudo dnf remove nvme-cli -y
          • sudo dnf install runc nvmetcli nvme-cli -y
          • sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf
          • sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf
      • nvme_loop
      • cephadm:
        • roleless: True
        • conf:
          • mgr:
            • debug mgr: 20
            • debug ms: 1
          • mon:
            • debug mon: 20
            • debug ms: 1
            • debug paxos: 20
          • osd:
            • debug ms: 1
            • debug osd: 20
            • osd mclock iops capacity threshold hdd: 49000
            • osd shutdown pgref assert: True
        • flavor: default
        • log-ignorelist:
          • \(MDS_ALL_DOWN\)
          • \(MDS_UP_LESS_THAN_MAX\)
          • OSD_DOWN
          • CEPHADM_FAILED_DAEMON
          • but is still running
          • PG_DEGRADED
        • log-only-match:
          • CEPHADM_
        • sha1: 70f8415b300f041766fa27faf7d5472699e32388
        • cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm
        • raw-osds: True
        • cluster: ceph
        • cephadm_mode: root
      • cephadm.shell:
        • host.a:
          • ceph orch status
          • ceph orch ps
          • ceph orch ls
          • ceph orch host ls
          • ceph orch device ls
          • ceph orch ls | grep '^osd.all-available-devices '
      • cephadm.shell:
        • host.a:
          • set -e set -x ceph orch ps ceph orch device ls ceph osd tree ORCH_PS=$(ceph orch ps) if grep -q "No daemons" <<< "$ORCH_PS"; then echo "No OSDs were deployed" exit 1 fi ceph orch ps | grep -q "running" if grep -q "failed" <<< "$ORCH_PS"; then echo "At least one raw OSD deployed is failed" exit 1 fi if grep -q "stopped" <<< "$ORCH_PS"; then echo "At least one raw OSD deployed is stopped" exit 1 fi if ceph-volume lvm list; then echo "ceph-volume lvm list was expected to give non-zero rc with all raw OSDs" exit 1 fi
  • timestamp: 2026-03-30 19:18:25
  • teuthology_branch: uv2
  • verbose: False
  • pcp_grafana_url: (Empty)
  • priority: 1000
  • user: kyr
  • queue: (Empty)
  • posted: 2026-03-30 19:20:11
  • started: 2026-03-31 00:33:41
  • updated: 2026-03-31 00:37:09
  • status_class: danger
  • runtime: 0:03:28
  • wait_time: 0:01:41