Nodes: vm00 vm05

Description: rbd/migration-external/{1-base/install 2-clusters/2-node 3-objectstore/bluestore-comp-zstd 4-supported-random-distro$/{centos_latest} 5-data-pool/ec 6-prepare/native-clone 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute conf/{disable-pool-app}}

Log: https://schulp.build.clyso.com/a/kyr-2026-03-20_22:04:26-rbd-tentacle-none-default-vps/3511/teuthology.log

Failure Reason:

hit max job timeout

  • log_href: https://schulp.build.clyso.com/a/kyr-2026-03-20_22:04:26-rbd-tentacle-none-default-vps/3511/teuthology.log
  • archive_path: /archive/kyr-2026-03-20_22:04:26-rbd-tentacle-none-default-vps/3511
  • description: rbd/migration-external/{1-base/install 2-clusters/2-node 3-objectstore/bluestore-comp-zstd 4-supported-random-distro$/{centos_latest} 5-data-pool/ec 6-prepare/native-clone 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute conf/{disable-pool-app}}
  • duration: (Empty)
  • email: (Empty)
  • failure_reason: hit max job timeout
  • flavor: default
  • job_id: 3511
  • kernel: (Empty)
  • last_in_suite: False
  • machine_type: vps
  • name: kyr-2026-03-20_22:04:26-rbd-tentacle-none-default-vps
  • nuke_on_error: (Empty)
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: tentacle
    • ansible.cephlab:
      • branch: main
      • repo: https://github.com/kshtsk/ceph-cm-ansible.git
      • skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs
      • vars:
        • logical_volumes:
          • lv_1:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_2:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_3:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_4:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
        • timezone: UTC
        • volume_groups:
          • vg_nvme:
            • pvs: /dev/vdb,/dev/vdc,/dev/vdd,/dev/vde
    • ceph:
      • conf:
        • client:
          • rbd default data pool: datapool
        • global:
          • mon warn on pool no app: False
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • bluestore block size: 96636764160
          • bluestore compression algorithm: zstd
          • bluestore compression mode: aggressive
          • bluestore fsck on mount: True
          • debug bluefs: 1/20
          • debug bluestore: 1/20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 4/10
          • enable experimental unrecoverable data corrupting features: *
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd debug randomize hobject sort order: False
          • osd failsafe full ratio: 0.95
          • osd mclock iops capacity threshold hdd: 49000
          • osd objectstore: bluestore
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • sha1: 70f8415b300f041766fa27faf7d5472699e32388
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
    • cephadm:
      • cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm
    • install:
      • ceph:
        • flavor: default
        • sha1: 70f8415b300f041766fa27faf7d5472699e32388
      • extra_system_packages:
        • deb:
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
        • rpm:
          • bzip2
          • perl-Test-Harness
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
    • thrashosds:
      • bdev_inject_crash: 2
      • bdev_inject_crash_probability: 0.5
    • workunit:
      • branch: tt-tentacle
      • sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • owner: kyr
  • pid: 2413792
  • roles:
    • ['cluster1.mon.a', 'cluster1.mgr.x', 'cluster1.osd.0', 'cluster1.osd.1', 'cluster1.osd.2', 'cluster1.client.0']
    • ['cluster2.mon.a', 'cluster2.mgr.x', 'cluster2.osd.0', 'cluster2.osd.1', 'cluster2.osd.2', 'cluster2.client.0']
  • sentry_event: (Empty)
  • status: dead
  • success: (Empty)
  • branch: tentacle
  • seed: 3051
  • sha1: 70f8415b300f041766fa27faf7d5472699e32388
  • subset: 1/128
  • suite: rbd
  • suite_branch: tt-tentacle
  • suite_path: /home/teuthos/src/github.com_kshtsk_ceph_0392f78529848ec72469e8e431875cb98d3a5fb4/qa
  • suite_relpath: qa
  • suite_repo: https://github.com/kshtsk/ceph.git
  • suite_sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • targets:
    • vm00.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIlJtZQAVOyr/pZ2XXp/zXlAj657Y65wyBE68uHvH/O8DebhrkM64dEOJoia0QdBTS2TpaPJzF2KjRSDrA6ZM6E=
    • vm05.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNjgPwXk1pGcsuiUTLCtFGi2TsOTHeJuZv6ECviezjtB9pwAxMFHyyxGtDEXfr1YFUHeVN+ziD0EGz7t1EbRP00=
  • tasks:
      • install
      • ceph:
        • cluster: cluster1
      • ceph:
        • cluster: cluster2
      • exec:
        • cluster1.client.0:
          • sudo ceph --cluster cluster1 osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
          • sudo ceph --cluster cluster1 osd pool create datapool 4 4 erasure teuthologyprofile
          • sudo ceph --cluster cluster1 osd pool set datapool allow_ec_overwrites true
          • rbd --cluster cluster1 pool init datapool
        • cluster2.client.0:
          • sudo ceph --cluster cluster2 osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
          • sudo ceph --cluster cluster2 osd pool create datapool 4 4 erasure teuthologyprofile
          • sudo ceph --cluster cluster2 osd pool set datapool allow_ec_overwrites true
          • rbd --cluster cluster2 pool init datapool
      • exec:
        • cluster2.client.0:
          • echo '{"type":"qcow","stream":{"type":"http","url":"http://download.ceph.com/qa/ubuntu-12.04.qcow2"}}' | rbd --cluster cluster2 migration prepare --import-only --source-spec-path - client.0.0-src
          • rbd --cluster cluster2 migration execute client.0.0-src
          • rbd --cluster cluster2 migration commit client.0.0-src
          • rbd --cluster cluster2 snap create client.0.0-src@snap
          • rbd --cluster cluster2 snap protect client.0.0-src@snap
          • rbd --cluster cluster2 clone client.0.0-src@snap client.0.0
          • rbd --cluster cluster2 snap create client.0.0@snap
          • rbd --cluster cluster2 create --size 1G client.0.1-src
          • rbd --cluster cluster2 bench --io-type write --io-pattern rand --io-size 16K --io-threads 1 --io-total 1M client.0.1-src
          • rbd --cluster cluster2 snap create client.0.1-src@snap
          • rbd --cluster cluster2 snap protect client.0.1-src@snap
          • rbd --cluster cluster2 clone client.0.1-src@snap client.0.1
          • rbd --cluster cluster2 bench --io-type write --io-pattern rand --io-size 16K --io-threads 1 --io-total 1M client.0.1
          • rbd --cluster cluster2 snap create client.0.1@snap
          • rbd --cluster cluster2 create --size 1G client.0.2-src
          • rbd --cluster cluster2 bench --io-type write --io-pattern rand --io-size 16K --io-threads 1 --io-total 1M client.0.2-src
          • rbd --cluster cluster2 snap create client.0.2-src@snap
          • rbd --cluster cluster2 snap protect client.0.2-src@snap
          • rbd --cluster cluster2 clone client.0.2-src@snap client.0.2
          • rbd --cluster cluster2 bench --io-type write --io-pattern rand --io-size 16K --io-threads 1 --io-total 2M client.0.2
          • rbd --cluster cluster2 snap create client.0.2@snap
      • exec:
        • cluster1.client.0:
          • echo '{"type":"native","cluster_name":"cluster2","client_name":"client.admin","pool_name":"rbd","image_name":"client.0.0","snap_name":"snap"}' | rbd --cluster cluster1 migration prepare --import-only --source-spec-path - client.0.0
          • echo '{"type":"native","cluster_name":"cluster2","client_name":"client.admin","pool_name":"rbd","image_name":"client.0.1","snap_name":"snap"}' | rbd --cluster cluster1 migration prepare --import-only --source-spec-path - client.0.1
          • echo '{"type":"native","cluster_name":"cluster2","client_name":"client.admin","pool_name":"rbd","image_name":"client.0.2","snap_name":"snap"}' | rbd --cluster cluster1 migration prepare --import-only --source-spec-path - client.0.2
      • parallel:
        • io_workload
        • migrate_workload
  • timestamp: 2026-03-20 22:04:26
  • teuthology_branch: clyso-debian-13
  • verbose: False
  • pcp_grafana_url: (Empty)
  • priority: 1000
  • user: kyr
  • queue: (Empty)
  • posted: 2026-03-20 22:04:55
  • started: 2026-03-23 19:28:13
  • updated: 2026-03-23 21:31:31
  • status_class: danger
  • runtime: 2:03:18