Nodes: vm08

Description: rados/singleton/{all/backfill-toofull mon_election/connectivity msgr-failures/many msgr/async-v2only objectstore/{bluestore/{alloc$/{avl} base mem$/{normal-1} onode-segment$/{512K} write$/{v1/{compr$/{no$/{no}} v1}}}} rados supported-random-distro$/{centos_latest}}

Log: https://schulp.build.clyso.com/a/kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps/4301/teuthology.log

Failure Reason:

hit max job timeout

  • log_href: https://schulp.build.clyso.com/a/kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps/4301/teuthology.log
  • archive_path: /archive/kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps/4301
  • description: rados/singleton/{all/backfill-toofull mon_election/connectivity msgr-failures/many msgr/async-v2only objectstore/{bluestore/{alloc$/{avl} base mem$/{normal-1} onode-segment$/{512K} write$/{v1/{compr$/{no$/{no}} v1}}}} rados supported-random-distro$/{centos_latest}}
  • duration: (Empty)
  • email: (Empty)
  • failure_reason: hit max job timeout
  • flavor: default
  • job_id: 4301
  • kernel: (Empty)
  • last_in_suite: False
  • machine_type: vps
  • name: kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps
  • nuke_on_error: (Empty)
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: tentacle
    • ansible.cephlab:
      • branch: main
      • repo: https://github.com/kshtsk/ceph-cm-ansible.git
      • skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs
      • vars:
        • logical_volumes:
          • lv_1:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_2:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_3:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_4:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
        • timezone: UTC
        • volume_groups:
          • vg_nvme:
            • pvs: /dev/vdb,/dev/vdc,/dev/vdd,/dev/vde
    • ceph:
      • conf:
        • global:
          • mon client directed command retry: 5
          • mon client hunt interval max multiple: 2
          • mon election default strategy: 3
          • mon mgr beacon grace: 90
          • ms bind msgr1: False
          • ms bind msgr2: True
          • ms inject socket failures: 1000
          • ms type: async
        • mgr:
          • debug mgr: 20
          • debug monc: 10
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon scrub interval: 300
        • osd:
          • bdev async discard: True
          • bdev enable discard: True
          • bluestore allocator: avl
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • bluestore onode segment size: 512K
          • bluestore write v2: False
          • bluestore zero block detection: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd debug verify cached snaps: True
          • osd debug verify missing on start: True
          • osd failsafe full ratio: 0.95
          • osd mclock iops capacity threshold hdd: 49000
          • osd mclock override recovery settings: True
          • osd mclock profile: high_recovery_ops
          • osd mclock skip benchmark: True
          • osd objectstore: bluestore
          • osd op queue: debug_random
          • osd op queue cut off: debug_random
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(OSD_SLOW_PING_TIME
        • \(MON_DOWN\)
      • sha1: 5bb3278730741031382ca9c3dc9d221a942e06a2
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
    • cephadm:
      • cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm
    • install:
      • ceph:
        • flavor: default
        • sha1: 5bb3278730741031382ca9c3dc9d221a942e06a2
      • extra_system_packages:
        • deb:
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
        • rpm:
          • bzip2
          • perl-Test-Harness
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
    • thrashosds:
      • bdev_inject_crash: 2
      • bdev_inject_crash_probability: 0.5
    • workunit:
      • branch: tt-tentacle
      • sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • owner: kyr
  • pid: 825942
  • roles:
    • ['mon.a', 'mon.b', 'mon.c', 'mgr.x', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
  • sentry_event: (Empty)
  • status: dead
  • success: (Empty)
  • branch: tentacle
  • seed: 6407
  • sha1: 5bb3278730741031382ca9c3dc9d221a942e06a2
  • subset: 1/100000
  • suite: rados
  • suite_branch: tt-tentacle
  • suite_path: /home/teuthos/src/github.com_kshtsk_ceph_0392f78529848ec72469e8e431875cb98d3a5fb4/qa
  • suite_relpath: qa
  • suite_repo: https://github.com/kshtsk/ceph.git
  • suite_sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • targets:
    • vm08.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIgKNLIxE+Lj1voHUWXKiezpgoF+4nYWgfb18S/R+FGwoFC0iS9Sf+xYzcGerXTa4ld4jV8lWnREyyD4PnKfET4=
  • tasks:
      • install
      • ceph:
        • conf:
          • osd:
            • osd max pg log entries: 5
            • osd min pg log entries: 5
        • create_rbd_pool: False
        • log-ignorelist:
          • Error
          • overall HEALTH_
          • \(OBJECT_
          • \(OSDMAP_FLAGS\)
          • \(OSD_
          • \(PG_
          • \(POOL_BACKFILLFULL\)
          • \(POOL_NEARFULL\)
          • \(SLOW_OPS\)
          • \(TOO_FEW_PGS\)
          • Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running
          • slow request
          • \(POOL_APP_NOT_ENABLED\)
        • pre-mgr-commands:
          • sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
      • backfill_toofull
  • timestamp: 2026-03-31 11:18:10
  • teuthology_branch: uv2
  • verbose: False
  • pcp_grafana_url: (Empty)
  • priority: 1000
  • user: kyr
  • queue: (Empty)
  • posted: 2026-03-31 11:18:34
  • started: 2026-03-31 11:28:49
  • updated: 2026-03-31 13:32:11
  • status_class: danger
  • runtime: 2:03:22