Nodes: vm01 vm04 vm07

Description: orch:cephadm:workunits/{0-distro/centos_9.stream agent/on mon_election/connectivity task/test_host_drain}

Log: https://schulp.build.clyso.com/a/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10/teuthology.log

  • log_href: https://schulp.build.clyso.com/a/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10/teuthology.log
  • archive_path: /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10
  • description: orch:cephadm:workunits/{0-distro/centos_9.stream agent/on mon_election/connectivity task/test_host_drain}
  • duration: 0:11:44
  • email: (Empty)
  • failure_reason: (Empty)
  • flavor: default
  • job_id: 10
  • kernel: (Empty)
  • last_in_suite: False
  • machine_type: vps
  • name: irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps
  • nuke_on_error: (Empty)
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: cobaltcore-storage-v19.2.3-fasttrack-5
    • ansible.cephlab:
      • branch: main
      • repo: https://github.com/kshtsk/ceph-cm-ansible.git
      • skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs
      • vars:
        • timezone: UTC
    • ceph:
      • conf:
        • global:
          • mon election default strategy: 3
        • mgr:
          • debug mgr: 20
          • debug ms: 1
          • mgr/cephadm/use_agent: True
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
          • osd mclock iops capacity threshold hdd: 49000
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • MON_DOWN
        • mons down
        • mon down
        • out of quorum
        • CEPHADM_STRAY_HOST
        • CEPHADM_STRAY_DAEMON
        • CEPHADM_FAILED_DAEMON
      • log-only-match:
        • CEPHADM_
      • sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
    • cephadm:
      • cephadm_binary_url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm
      • containers:
        • image: harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5
    • install:
      • ceph:
        • flavor: default
        • sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a
      • extra_system_packages:
        • deb:
          • python3-xmltodict
          • s3cmd
        • rpm:
          • bzip2
          • perl-Test-Harness
          • python3-xmltodict
          • s3cmd
      • repos:
          • name: ceph-source
          • priority: 1
          • url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/SRPMS
          • name: ceph-noarch
          • priority: 1
          • url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/noarch
          • name: ceph
          • priority: 1
          • url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/x86_64
    • selinux:
      • allowlist:
        • scontext=system_u:system_r:logrotate_t:s0
        • scontext=system_u:system_r:getty_t:s0
    • workunit:
      • branch: tt-fasttrack-5-workunits
      • sha1: f96e33505a05da25eb24b46ae34fbbd1718a702b
  • owner: irq0
  • pid: 11469
  • roles:
    • ['host.a', 'mon.a', 'mgr.a', 'osd.0', 'osd.1']
    • ['host.b', 'mon.b', 'mgr.b', 'osd.2', 'osd.3']
    • ['host.c', 'mon.c', 'osd.4', 'osd.5']
  • sentry_event: (Empty)
  • status: pass
  • success: True
  • branch: cobaltcore-storage-v19.2.3-fasttrack-5
  • seed: 8363
  • sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a
  • subset: 1/64
  • suite: orch:cephadm:workunits
  • suite_branch: tt-fasttrack-5-workunits
  • suite_path: /home/teuthos/src/github.com_kshtsk_ceph_f96e33505a05da25eb24b46ae34fbbd1718a702b/qa
  • suite_relpath: qa
  • suite_repo: https://github.com/kshtsk/ceph.git
  • suite_sha1: f96e33505a05da25eb24b46ae34fbbd1718a702b
  • targets:
    • vm01.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBI0gV1mXoWqxwHZ7kVKoQs0nE+2LQ+M8MOa5O4NX/KEQbdbgqr1NqZ0vZQQat+MWMHbMtEjKmfmqyxhiNA6aUQ=
    • vm04.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBArEyZT3Y3HawOhf365kL7KuhAUYmgz9Z0n/9FJrE8zyi+jojfodY+iS9WPIthfkG+NBSHYYskmKU+o95rbqNiI=
    • vm07.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNwUgCSVXpLtdEzkll7Gr/Ag7HDAxMa3Kj5lU6DJVMiHg2sb1aIR7qkosoDQM/13It0RhQ1qte8n+Blvxmoh9ac=
  • tasks:
      • internal.save_config
      • internal.check_lock
      • internal.add_remotes
      • console_log
      • internal.connect
      • internal.push_inventory
      • internal.serialize_remote_roles
      • internal.check_conflict
      • internal.check_ceph_data
      • internal.vm_setup
      • internal.base
      • internal.archive_upload
      • internal.archive
      • internal.coredump
      • internal.sudo
      • internal.syslog
      • internal.timer
      • pcp
      • selinux
      • ansible.cephlab
      • clock
      • pexec:
        • all:
          • sudo dnf remove nvme-cli -y
          • sudo dnf install nvmetcli nvme-cli -y
      • cephadm
      • cephadm.shell:
        • host.a:
          • set -ex HOSTNAMES=$(ceph orch host ls --format json | jq -r '.[] | .hostname') for host in $HOSTNAMES; do # find the hostname for "host.c" which will have no mgr HAS_MGRS=$(ceph orch ps --hostname ${host} --format json | jq 'any(.daemon_type == "mgr")') if [ "$HAS_MGRS" == "false" ]; then HOST_C="${host}" fi done # One last thing to worry about before draining the host # is that the teuthology test tends to put the explicit # hostnames in the placement for the mon service. # We want to make sure we can drain without providing # --force and there is a check for the host being removed # being listed explicitly in the placements. Therefore, # we should remove it from the mon placement. ceph orch ls mon --export > mon.yaml sed /"$HOST_C"/d mon.yaml > mon_adjusted.yaml ceph orch apply -i mon_adjusted.yaml # now drain that host ceph orch host drain $HOST_C --zap-osd-devices # wait for drain to complete HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C) while [ "$HOST_C_DAEMONS" != "No daemons reported" ]; do sleep 15 HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C) done # we want to check the ability to remove the host from # the CRUSH map, so we should first verify the host is in # the CRUSH map. ceph osd getcrushmap -o compiled-crushmap crushtool -d compiled-crushmap -o crushmap.txt CRUSH_MAP=$(cat crushmap.txt) if ! grep -q "$HOST_C" <<< "$CRUSH_MAP"; then printf "Expected to see $HOST_C in CRUSH map. Saw:\n\n$CRUSH_MAP" exit 1 fi # If the drain was successful, we should be able to remove the # host without force with no issues. If there are still daemons # we will get a response telling us to drain the host and a # non-zero return code ceph orch host rm $HOST_C --rm-crush-entry # verify we've successfully removed the host from the CRUSH map sleep 30 ceph osd getcrushmap -o compiled-crushmap crushtool -d compiled-crushmap -o crushmap.txt CRUSH_MAP=$(cat crushmap.txt) if grep -q "$HOST_C" <<< "$CRUSH_MAP"; then printf "Saw $HOST_C in CRUSH map after it should have been removed.\n\n$CRUSH_MAP" exit 1 fi
  • timestamp: 2026-03-07 10:02:54
  • teuthology_branch: clyso-debian-13
  • verbose: False
  • pcp_grafana_url: (Empty)
  • priority: 1000
  • user: irq0
  • queue: (Empty)
  • posted: 2026-03-07 10:02:56
  • started: 2026-03-07 10:04:39
  • updated: 2026-03-07 10:18:20
  • status_class: success
  • runtime: 0:13:41
  • wait_time: 0:01:57