Nodes: vm02 vm06 vm07 vm09

Description: rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-4} ec_optimizations/ec_optimizations_on mon_election/connectivity msgr-failures/osd-dispatch-delay objectstore/{bluestore/{alloc$/{hybrid} base mem$/{low} onode-segment$/{512K-onoff} write$/{v1/{compr$/{yes$/{snappy}} v1}}}} rados recovery-overrides/{default} supported-random-distro$/{centos_latest} thrashers/careful_host thrashosds-health workloads/ec-rados-plugin=isa-k=6-m=3}

Log: https://schulp.build.clyso.com/a/kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps/4308/teuthology.log

  • log_href: https://schulp.build.clyso.com/a/kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps/4308/teuthology.log
  • archive_path: /archive/kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps/4308
  • description: rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-4} ec_optimizations/ec_optimizations_on mon_election/connectivity msgr-failures/osd-dispatch-delay objectstore/{bluestore/{alloc$/{hybrid} base mem$/{low} onode-segment$/{512K-onoff} write$/{v1/{compr$/{yes$/{snappy}} v1}}}} rados recovery-overrides/{default} supported-random-distro$/{centos_latest} thrashers/careful_host thrashosds-health workloads/ec-rados-plugin=isa-k=6-m=3}
  • duration: 0:24:28
  • email: (Empty)
  • failure_reason: (Empty)
  • flavor: default
  • job_id: 4308
  • kernel: (Empty)
  • last_in_suite: False
  • machine_type: vps
  • name: kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps
  • nuke_on_error: (Empty)
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: tentacle
    • ansible.cephlab:
      • branch: main
      • repo: https://github.com/kshtsk/ceph-cm-ansible.git
      • skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs
      • vars:
        • logical_volumes:
          • lv_1:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_2:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_3:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_4:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
        • timezone: UTC
        • volume_groups:
          • vg_nvme:
            • pvs: /dev/vdb,/dev/vdc,/dev/vdd,/dev/vde
    • ceph:
      • conf:
        • global:
          • enable experimental unrecoverable data corrupting features: *
          • mon election default strategy: 3
          • osd debug inject dispatch delay duration: 0.1
          • osd debug inject dispatch delay probability: 0.1
          • osd_pool_default_flag_ec_optimizations: True
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon min osdmap epochs: 50
          • mon osdmap full prune interval: 2
          • mon osdmap full prune min: 15
          • mon osdmap full prune txsize: 2
          • mon scrub interval: 300
          • paxos service trim min: 10
        • osd:
          • bdev async discard: True
          • bdev enable discard: True
          • bluefs allocator: hybrid
          • bluestore allocator: hybrid
          • bluestore block size: 96636764160
          • bluestore compression algorithm: snappy
          • bluestore compression mode: aggressive
          • bluestore debug onode segmentation random: True
          • bluestore fsck on mount: True
          • bluestore onode segment size: 512K
          • bluestore write v2: False
          • bluestore zero block detection: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd blocked scrub grace period: 3600
          • osd debug reject backfill probability: 0.3
          • osd debug verify cached snaps: True
          • osd debug verify missing on start: True
          • osd failsafe full ratio: 0.95
          • osd max backfills: 3
          • osd max markdown count: 1000
          • osd mclock iops capacity threshold hdd: 49000
          • osd mclock override recovery settings: True
          • osd mclock profile: high_recovery_ops
          • osd mclock skip benchmark: True
          • osd memory target: 1610612736
          • osd objectstore: bluestore
          • osd op queue: debug_random
          • osd op queue cut off: debug_random
          • osd scrub max interval: 120
          • osd scrub min interval: 60
          • osd shutdown pgref assert: True
          • osd snap trim sleep: 2
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • but it is still running
        • objects unfound and apparently lost
        • overall HEALTH_
        • \(OSDMAP_FLAGS\)
        • \(OSD_
        • \(PG_
        • \(POOL_
        • \(CACHE_POOL_
        • \(SMALLER_PGP_NUM\)
        • \(OBJECT_
        • SLOW_OPS
        • \(REQUEST_SLOW\)
        • \(TOO_FEW_PGS\)
        • slow request
        • timeout on replica
        • late reservation from
        • MON_DOWN
        • OSDMAP_FLAGS
        • OSD_DOWN
        • PG_DEGRADED
        • PG_AVAILABILITY
        • POOL_APP_NOT_ENABLED
        • mons down
        • mon down
        • out of quorum
        • noscrub
        • nodeep-scrub
        • Degraded data redundancy
        • is down
        • osds down
        • pg .*? is .*?degraded.*?, acting
        • pg .*? is stuck
        • pg degraded
        • PG_BACKFILL_FULL
        • Low space hindering backfill .*? backfill_toofull
        • OSD_HOST_DOWN
        • OSD_ROOT_DOWN
      • sha1: 5bb3278730741031382ca9c3dc9d221a942e06a2
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
    • cephadm:
      • cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm
    • install:
      • ceph:
        • flavor: default
        • sha1: 5bb3278730741031382ca9c3dc9d221a942e06a2
      • extra_system_packages:
        • deb:
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
        • rpm:
          • bzip2
          • perl-Test-Harness
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
    • thrashosds:
      • bdev_inject_crash: 2
      • bdev_inject_crash_probability: 0.5
    • workunit:
      • branch: tt-tentacle
      • sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • owner: kyr
  • pid: 866137
  • roles:
    • ['mon.a', 'mgr.y', 'osd.0', 'osd.4', 'osd.8', 'osd.12']
    • ['mon.b', 'osd.1', 'osd.5', 'osd.9', 'osd.13']
    • ['mon.c', 'osd.2', 'osd.6', 'osd.10', 'osd.14']
    • ['mgr.x', 'osd.3', 'osd.7', 'osd.11', 'osd.15', 'client.0']
  • sentry_event: (Empty)
  • status: pass
  • success: True
  • branch: tentacle
  • seed: 6407
  • sha1: 5bb3278730741031382ca9c3dc9d221a942e06a2
  • subset: 1/100000
  • suite: rados
  • suite_branch: tt-tentacle
  • suite_path: /home/teuthos/src/github.com_kshtsk_ceph_0392f78529848ec72469e8e431875cb98d3a5fb4/qa
  • suite_relpath: qa
  • suite_repo: https://github.com/kshtsk/ceph.git
  • suite_sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • targets:
    • vm02.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCRTlBbVx2YtfFjcWTClPoqnJX1OuGTqNU9+fsD6TGOI9qbn8Pc00Vh137yVHHwfoUejKAxXQg07SMkLsJnAct4=
    • vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIVepMwrEvFi2YYh4Ei+cemPU8Mf6+GZg1rCkWbSWsXV1jjknPNilWADVJwblNAgrtXJFnYRIb8PV3MFmJH7qpw=
    • vm07.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBN0/EI3yHtXPl+JtVh7FXfs81rkJ+SJA/aG6FcsuAFfprd5FPgKx+9zaHOASnA6/E5J6v4gwa0BeuXYbDOB+sTI=
    • vm09.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEY+AEYpBGlTuWbdtzZzNrGWntgBJWFFZRVZx30TqXC31miDC1YISmYvTibhR1hVbY3P/bohEE+navPx84P7Ots=
  • tasks:
      • internal.check_packages
      • internal.buildpackages_prep
      • internal.save_config
      • internal.check_lock
      • internal.add_remotes
      • console_log
      • internal.connect
      • internal.push_inventory
      • internal.serialize_remote_roles
      • internal.check_conflict
      • internal.check_ceph_data
      • internal.vm_setup
      • internal.base
      • internal.archive_upload
      • internal.archive
      • internal.coredump
      • internal.sudo
      • internal.syslog
      • internal.timer
      • pcp
      • selinux
      • ansible.cephlab
      • clock
      • install
      • ceph:
        • conf:
          • osd:
            • debug monc: 20
            • bdev async discard: True
            • bdev enable discard: True
            • bluefs allocator: hybrid
            • bluestore allocator: hybrid
            • bluestore block size: 96636764160
            • bluestore compression algorithm: snappy
            • bluestore compression mode: aggressive
            • bluestore debug onode segmentation random: True
            • bluestore fsck on mount: True
            • bluestore onode segment size: 512K
            • bluestore write v2: False
            • bluestore zero block detection: True
            • debug bluefs: 20
            • debug bluestore: 20
            • debug ms: 1
            • debug osd: 20
            • debug rocksdb: 10
            • mon osd backfillfull_ratio: 0.85
            • mon osd full ratio: 0.9
            • mon osd nearfull ratio: 0.8
            • osd blocked scrub grace period: 3600
            • osd debug reject backfill probability: 0.3
            • osd debug verify cached snaps: True
            • osd debug verify missing on start: True
            • osd failsafe full ratio: 0.95
            • osd max backfills: 3
            • osd max markdown count: 1000
            • osd mclock iops capacity threshold hdd: 49000
            • osd mclock override recovery settings: True
            • osd mclock profile: high_recovery_ops
            • osd mclock skip benchmark: True
            • osd memory target: 1610612736
            • osd objectstore: bluestore
            • osd op queue: debug_random
            • osd op queue cut off: debug_random
            • osd scrub max interval: 120
            • osd scrub min interval: 60
            • osd shutdown pgref assert: True
            • osd snap trim sleep: 2
          • global:
            • enable experimental unrecoverable data corrupting features: *
            • mon election default strategy: 3
            • osd debug inject dispatch delay duration: 0.1
            • osd debug inject dispatch delay probability: 0.1
            • osd_pool_default_flag_ec_optimizations: True
          • mgr:
            • debug mgr: 20
            • debug ms: 1
          • mon:
            • debug mon: 20
            • debug ms: 1
            • debug paxos: 20
            • mon min osdmap epochs: 50
            • mon osdmap full prune interval: 2
            • mon osdmap full prune min: 15
            • mon osdmap full prune txsize: 2
            • mon scrub interval: 300
            • paxos service trim min: 10
        • flavor: default
        • fs: xfs
        • log-ignorelist:
          • \(MDS_ALL_DOWN\)
          • \(MDS_UP_LESS_THAN_MAX\)
          • but it is still running
          • objects unfound and apparently lost
          • overall HEALTH_
          • \(OSDMAP_FLAGS\)
          • \(OSD_
          • \(PG_
          • \(POOL_
          • \(CACHE_POOL_
          • \(SMALLER_PGP_NUM\)
          • \(OBJECT_
          • SLOW_OPS
          • \(REQUEST_SLOW\)
          • \(TOO_FEW_PGS\)
          • slow request
          • timeout on replica
          • late reservation from
          • MON_DOWN
          • OSDMAP_FLAGS
          • OSD_DOWN
          • PG_DEGRADED
          • PG_AVAILABILITY
          • POOL_APP_NOT_ENABLED
          • mons down
          • mon down
          • out of quorum
          • noscrub
          • nodeep-scrub
          • Degraded data redundancy
          • is down
          • osds down
          • pg .*? is .*?degraded.*?, acting
          • pg .*? is stuck
          • pg degraded
          • PG_BACKFILL_FULL
          • Low space hindering backfill .*? backfill_toofull
          • OSD_HOST_DOWN
          • OSD_ROOT_DOWN
        • sha1: 5bb3278730741031382ca9c3dc9d221a942e06a2
        • cluster: ceph
      • thrashosds:
        • aggressive_pg_num_changes: False
        • chance_pgnum_grow: 1
        • chance_pgnum_shrink: 1
        • chance_pgpnum_fix: 1
        • min_in: 2
        • thrash_hosts: True
        • timeout: 1200
        • sighup_delay: 0.1
        • optrack_toggle_delay: 2.0
        • dump_ops_enable: true
        • noscrub_toggle_delay: 2.0
        • random_eio: 0.0
        • bdev_inject_crash: 2
        • bdev_inject_crash_probability: 0.5
      • rados:
        • clients:
          • client.0
        • ec_pool: True
        • erasure_code_profile:
          • crush-failure-domain: osd
          • k: 6
          • m: 3
          • name: isa63profile
          • plugin: isa
          • technique: reed_sol_van
        • min_size: 6
        • objects: 50
        • op_weights:
          • append: 100
          • copy_from: 50
          • delete: 50
          • read: 100
          • rmattr: 25
          • rollback: 50
          • setattr: 25
          • snap_create: 50
          • snap_remove: 50
          • write: 0
        • ops: 4000
        • write_append_excl: False
  • timestamp: 2026-03-31 11:18:10
  • teuthology_branch: uv2
  • verbose: False
  • pcp_grafana_url: (Empty)
  • priority: 1000
  • user: kyr
  • queue: (Empty)
  • posted: 2026-03-31 11:18:36
  • started: 2026-03-31 13:19:19
  • updated: 2026-03-31 14:11:29
  • status_class: success
  • runtime: 0:52:10
  • wait_time: 0:27:42