Nodes: vm06 vm08

Description: orch:cephadm:osds/{0-distro/centos_9.stream_runc 1-start 2-ops/rmdir-reactivate}

Log: https://schulp.build.clyso.com/a/irq0-2026-03-06_09:41:01-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/112/teuthology.log

  • log_href: https://schulp.build.clyso.com/a/irq0-2026-03-06_09:41:01-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/112/teuthology.log
  • archive_path: /archive/irq0-2026-03-06_09:41:01-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/112
  • description: orch:cephadm:osds/{0-distro/centos_9.stream_runc 1-start 2-ops/rmdir-reactivate}
  • duration: 0:07:03
  • email: (Empty)
  • failure_reason: (Empty)
  • flavor: default
  • job_id: 112
  • kernel: (Empty)
  • last_in_suite: False
  • machine_type: vps
  • name: irq0-2026-03-06_09:41:01-orch:cephadm:osds-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps
  • nuke_on_error: (Empty)
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: cobaltcore-storage-v19.2.3-fasttrack-3
    • ansible.cephlab:
      • branch: main
      • repo: https://github.com/kshtsk/ceph-cm-ansible.git
      • skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs
      • vars:
        • timezone: Europe/Berlin
    • ceph:
      • conf:
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
          • osd mclock iops capacity threshold hdd: 49000
          • osd shutdown pgref assert: True
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • OSD_DOWN
        • CEPHADM_FAILED_DAEMON
        • but is still running
        • PG_DEGRADED
      • log-only-match:
        • CEPHADM_
      • sha1: c24117fd5525679b799527bc1bd1f1dd0a2db5e2
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
    • cephadm:
      • cephadm_binary_url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm
      • containers:
        • image: harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3
    • install:
      • ceph:
        • flavor: default
        • sha1: c24117fd5525679b799527bc1bd1f1dd0a2db5e2
      • extra_system_packages:
        • deb:
          • python3-xmltodict
          • s3cmd
        • rpm:
          • bzip2
          • perl-Test-Harness
          • python3-xmltodict
          • s3cmd
      • repos:
          • name: ceph-source
          • priority: 1
          • url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-47-gc24117fd552/el9.clyso/SRPMS
          • name: ceph-noarch
          • priority: 1
          • url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-47-gc24117fd552/el9.clyso/noarch
          • name: ceph
          • priority: 1
          • url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-47-gc24117fd552/el9.clyso/x86_64
    • selinux:
      • allowlist:
        • scontext=system_u:system_r:logrotate_t:s0
    • workunit:
      • branch: tt-19.2.3-fasttrack-3-no-nvme-loop
      • sha1: 5726a36c3452e5b72190cfceba828abc62c819b7
  • owner: irq0
  • pid: 364712
  • roles:
    • ['host.a', 'client.0']
    • ['host.b', 'client.1']
  • sentry_event: (Empty)
  • status: pass
  • success: True
  • branch: cobaltcore-storage-v19.2.3-fasttrack-3
  • seed: 1661
  • sha1: c24117fd5525679b799527bc1bd1f1dd0a2db5e2
  • subset: (Empty)
  • suite: orch:cephadm:osds
  • suite_branch: tt-19.2.3-fasttrack-3-no-nvme-loop
  • suite_path: /home/teuthos/src/github.com_kshtsk_ceph_5726a36c3452e5b72190cfceba828abc62c819b7/qa
  • suite_relpath: qa
  • suite_repo: https://github.com/kshtsk/ceph.git
  • suite_sha1: 5726a36c3452e5b72190cfceba828abc62c819b7
  • targets:
    • vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPI9xBt7jLbWGkk768snr7Blx4dQGgu2KA7btrpvWrMJ/joTW1/u91fJHB22qgWQJRKGFBwcGmZesv4n868RqVE=
    • vm08.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJwVpfzEIMxMrAcf7FxAWzKLNCJCptT4vtl5pBtaPg2QH8NXcY9pGSIYj3/xeRR79aODNGKFn1l+bxREQkuljSg=
  • tasks:
      • internal.save_config
      • internal.check_lock
      • internal.add_remotes
      • console_log
      • internal.connect
      • internal.push_inventory
      • internal.serialize_remote_roles
      • internal.check_conflict
      • internal.check_ceph_data
      • internal.vm_setup
      • internal.base
      • internal.archive_upload
      • internal.archive
      • internal.coredump
      • internal.sudo
      • internal.syslog
      • internal.timer
      • pcp
      • selinux
      • ansible.cephlab
      • clock
      • pexec:
        • all:
          • sudo dnf remove nvme-cli -y
          • sudo dnf install runc nvmetcli nvme-cli -y
          • sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf
          • sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf
      • cephadm:
        • roleless: True
        • conf:
          • mgr:
            • debug mgr: 20
            • debug ms: 1
          • mon:
            • debug mon: 20
            • debug ms: 1
            • debug paxos: 20
          • osd:
            • debug ms: 1
            • debug osd: 20
            • osd mclock iops capacity threshold hdd: 49000
            • osd shutdown pgref assert: True
        • flavor: default
        • log-ignorelist:
          • \(MDS_ALL_DOWN\)
          • \(MDS_UP_LESS_THAN_MAX\)
          • OSD_DOWN
          • CEPHADM_FAILED_DAEMON
          • but is still running
          • PG_DEGRADED
        • log-only-match:
          • CEPHADM_
        • sha1: c24117fd5525679b799527bc1bd1f1dd0a2db5e2
        • cephadm_binary_url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm
        • containers:
          • image: harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3
        • cluster: ceph
        • cephadm_mode: root
      • cephadm.shell:
        • host.a:
          • ceph orch status
          • ceph orch ps
          • ceph orch ls
          • ceph orch host ls
          • ceph orch device ls
          • ceph orch ls | grep '^osd.all-available-devices '
      • cephadm.shell:
        • host.a:
          • set -e set -x ceph orch ps HOST=$(hostname -s) OSD=$(ceph orch ps $HOST | grep osd | head -n 1 | awk '{print $1}') echo "host $HOST, osd $OSD" ceph orch daemon stop $OSD while ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done ceph auth export $OSD > k ceph orch daemon rm $OSD --force ceph orch ps --refresh while ceph orch ps | grep $OSD ; do sleep 5 ; done ceph auth add $OSD -i k ceph cephadm osd activate $HOST while ! ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done
      • cephadm.healthy
  • timestamp: 2026-03-06 09:41:01
  • teuthology_branch: clyso-debian-13
  • verbose: False
  • pcp_grafana_url: (Empty)
  • priority: 1000
  • user: irq0
  • queue: (Empty)
  • posted: 2026-03-06 08:41:30
  • started: 2026-03-06 08:45:54
  • updated: 2026-03-06 08:55:10
  • status_class: success
  • runtime: 0:09:16
  • wait_time: 0:02:13