Nodes: vm02 vm06

Description: rados/upgrade/parallel/{0-random-distro$/{centos_9.stream} 0-start 1-tasks mon_election/classic overrides/ignorelist_health upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}

Log: https://schulp.build.clyso.com/a/kyr-2026-03-30_19:18:25-rados-tentacle-none-default-vps/4016/teuthology.log

Failure Reason:

"grep: /var/log/ceph/df61b140-2c71-11f1-9377-cdfbe4415398/ceph.log: No such file or directory" in cluster log

  • log_href: https://schulp.build.clyso.com/a/kyr-2026-03-30_19:18:25-rados-tentacle-none-default-vps/4016/teuthology.log
  • archive_path: /archive/kyr-2026-03-30_19:18:25-rados-tentacle-none-default-vps/4016
  • description: rados/upgrade/parallel/{0-random-distro$/{centos_9.stream} 0-start 1-tasks mon_election/classic overrides/ignorelist_health upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
  • duration: 0:03:32
  • email: (Empty)
  • failure_reason: "grep: /var/log/ceph/df61b140-2c71-11f1-9377-cdfbe4415398/ceph.log: No such file or directory" in cluster log
  • flavor: default
  • job_id: 4016
  • kernel: (Empty)
  • last_in_suite: False
  • machine_type: vps
  • name: kyr-2026-03-30_19:18:25-rados-tentacle-none-default-vps
  • nuke_on_error: (Empty)
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: tentacle
    • ansible.cephlab:
      • branch: main
      • repo: https://github.com/kshtsk/ceph-cm-ansible.git
      • skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs
      • vars:
        • logical_volumes:
          • lv_1:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_2:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_3:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_4:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
        • timezone: UTC
        • volume_groups:
          • vg_nvme:
            • pvs: /dev/vdb,/dev/vdc,/dev/vdd,/dev/vde
    • ceph:
      • conf:
        • global:
          • mon election default strategy: 1
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
          • osd mclock iops capacity threshold hdd: 49000
          • osd shutdown pgref assert: True
      • create_rbd_pool: True
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • do not have an application enabled
        • application not enabled
        • or freeform for custom applications
        • POOL_APP_NOT_ENABLED
        • is down
        • OSD_DOWN
        • mons down
        • mon down
        • MON_DOWN
        • out of quorum
        • PG_AVAILABILITY
        • PG_DEGRADED
        • Reduced data availability
        • Degraded data redundancy
        • pg .* is stuck inactive
        • pg .* is stuck peering
        • pg .* is .*degraded
        • FS_DEGRADED
        • OSDMAP_FLAGS
        • OSD_UPGRADE_FINISHED
        • CEPHADM_FAILED_DAEMON
        • pg .* is stuck undersized
        • HEALTH_WARN .* osds down
        • MDS_INSUFFICIENT_STANDBY
        • POOL_FULL
        • Telemetry requires re-opt-in
        • telemetry module includes new collections
        • MDS_ALL_DOWN
        • MDS_UP_LESS_THAN_MAX
        • OSD_SLOW_PING_TIME
        • reached quota
        • running out of quota
        • overall HEALTH_
        • CACHE_POOL_NO_HIT_SET
        • pool\(s\) full
        • POOL_FULL
        • SMALLER_PGP_NUM
        • SLOW_OPS
        • CACHE_POOL_NEAR_FULL
        • OBJECT_MISPLACED
        • slow request
        • noscrub
        • nodeep-scrub
      • sha1: 70f8415b300f041766fa27faf7d5472699e32388
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
    • cephadm:
      • cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm
    • install:
      • ceph:
        • extra_system_packages:
          • python3-pytest
        • flavor: default
        • sha1: 70f8415b300f041766fa27faf7d5472699e32388
      • extra_system_packages:
        • deb:
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
        • rpm:
          • bzip2
          • perl-Test-Harness
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
    • selinux:
      • allowlist:
        • scontext=system_u:system_r:logrotate_t:s0
        • scontext=system_u:system_r:getty_t:s0
    • workunit:
      • branch: tt-tentacle
      • sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • owner: kyr
  • pid: 312665
  • roles:
    • ['mon.a', 'mon.c', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a', 'alertmanager.a']
    • ['mon.b', 'mds.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'grafana.a', 'node-exporter.b']
  • sentry_event: (Empty)
  • status: fail
  • success: False
  • branch: tentacle
  • seed: 2483
  • sha1: 70f8415b300f041766fa27faf7d5472699e32388
  • subset: 1/100000
  • suite: rados
  • suite_branch: tt-tentacle
  • suite_path: /home/teuthos/src/github.com_kshtsk_ceph_0392f78529848ec72469e8e431875cb98d3a5fb4/qa
  • suite_relpath: qa
  • suite_repo: https://github.com/kshtsk/ceph.git
  • suite_sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • targets:
    • vm02.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJ44fMo67gAtBUKDfihjPYnzwyXo2poBmH+ZGgyeeF6mvnEA3yfaTIga7MS052LGfz/MD6iQYrypZ3NJmbz8Lec=
    • vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBqbw+r8UTYwUuiSGQOmvwKSePUAYyICQNN5//Ho0/VvLKnP+Oq5/dtCmSVxTsXGs570drgZGQ5Ej12dL4Xb6Ps=
  • tasks:
      • internal.check_packages
      • internal.buildpackages_prep
      • internal.save_config
      • internal.check_lock
      • internal.add_remotes
      • console_log
      • internal.connect
      • internal.push_inventory
      • internal.serialize_remote_roles
      • internal.check_conflict
      • internal.check_ceph_data
      • internal.vm_setup
      • internal.base
      • internal.archive_upload
      • internal.archive
      • internal.coredump
      • internal.sudo
      • internal.syslog
      • internal.timer
      • pcp
      • selinux
      • ansible.cephlab
      • clock
      • pexec:
        • all:
          • sudo dnf remove nvme-cli -y
          • sudo dnf install nvmetcli nvme-cli -y
      • install:
        • branch: reef
        • exclude_packages:
          • ceph-volume
        • extra_system_packages:
          • deb:
            • python3-pytest
            • python3-jmespath
            • python3-xmltodict
            • s3cmd
          • rpm:
            • python3-pytest
            • bzip2
            • perl-Test-Harness
            • python3-jmespath
            • python3-xmltodict
            • s3cmd
        • flavor: default
        • sha1: 70f8415b300f041766fa27faf7d5472699e32388
      • print: **** done install task...
      • print: **** done start installing reef cephadm ...
      • cephadm:
        • compiled_cephadm_branch: reef
        • conf:
          • osd:
            • osd_class_default_list: *
            • osd_class_load_list: *
            • debug ms: 1
            • debug osd: 20
            • osd mclock iops capacity threshold hdd: 49000
            • osd shutdown pgref assert: True
          • global:
            • mon election default strategy: 1
          • mgr:
            • debug mgr: 20
            • debug ms: 1
          • mon:
            • debug mon: 20
            • debug ms: 1
            • debug paxos: 20
        • image: quay.ceph.io/ceph-ci/ceph:reef
        • create_rbd_pool: True
        • flavor: default
        • log-ignorelist:
          • \(MDS_ALL_DOWN\)
          • \(MDS_UP_LESS_THAN_MAX\)
          • do not have an application enabled
          • application not enabled
          • or freeform for custom applications
          • POOL_APP_NOT_ENABLED
          • is down
          • OSD_DOWN
          • mons down
          • mon down
          • MON_DOWN
          • out of quorum
          • PG_AVAILABILITY
          • PG_DEGRADED
          • Reduced data availability
          • Degraded data redundancy
          • pg .* is stuck inactive
          • pg .* is stuck peering
          • pg .* is .*degraded
          • FS_DEGRADED
          • OSDMAP_FLAGS
          • OSD_UPGRADE_FINISHED
          • CEPHADM_FAILED_DAEMON
          • pg .* is stuck undersized
          • HEALTH_WARN .* osds down
          • MDS_INSUFFICIENT_STANDBY
          • POOL_FULL
          • Telemetry requires re-opt-in
          • telemetry module includes new collections
          • MDS_ALL_DOWN
          • MDS_UP_LESS_THAN_MAX
          • OSD_SLOW_PING_TIME
          • reached quota
          • running out of quota
          • overall HEALTH_
          • CACHE_POOL_NO_HIT_SET
          • pool\(s\) full
          • POOL_FULL
          • SMALLER_PGP_NUM
          • SLOW_OPS
          • CACHE_POOL_NEAR_FULL
          • OBJECT_MISPLACED
          • slow request
          • noscrub
          • nodeep-scrub
        • sha1: 70f8415b300f041766fa27faf7d5472699e32388
        • cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm
        • cluster: ceph
        • cephadm_mode: root
      • print: **** done end installing reef cephadm ...
      • print: **** done start cephadm.shell ceph config set mgr...
      • cephadm.shell:
        • mon.a:
          • ceph config set mgr mgr/cephadm/use_repo_digest true --force
      • print: **** done cephadm.shell ceph config set mgr...
      • print: **** done start telemetry reef...
      • workunit:
        • clients:
          • client.0:
            • test_telemetry_reef.sh
      • print: **** done end telemetry reef...
      • print: **** done start parallel
      • parallel:
        • workload
        • upgrade-sequence
      • print: **** done end parallel
      • print: **** done start telemetry x...
      • workunit:
        • clients:
          • client.0:
            • test_telemetry_reef_x.sh
      • print: **** done end telemetry x...
  • timestamp: 2026-03-30 19:18:25
  • teuthology_branch: uv2
  • verbose: False
  • pcp_grafana_url: (Empty)
  • priority: 1000
  • user: kyr
  • queue: (Empty)
  • posted: 2026-03-30 19:18:53
  • started: 2026-03-30 19:47:49
  • updated: 2026-03-30 19:52:58
  • status_class: danger
  • runtime: 0:05:09
  • wait_time: 0:01:37