- log_href: https://schulp.build.clyso.com/a/irq0-2026-03-06_13:20:18-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/270/teuthology.log
- archive_path: /archive/irq0-2026-03-06_13:20:18-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps/270
- description: orch:cephadm:workunits/{0-distro/centos_9.stream agent/on mon_election/connectivity task/test_host_drain}
- duration: 0:09:27
- email: (Empty)
- failure_reason: (Empty)
- flavor: default
- job_id: 270
- kernel: (Empty)
- last_in_suite: False
- machine_type: vps
- name: irq0-2026-03-06_13:20:18-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-3-none-default-vps
- nuke_on_error: (Empty)
- os_type: centos
- os_version: 9.stream
- overrides:
- admin_socket:
- branch: cobaltcore-storage-v19.2.3-fasttrack-3
- ansible.cephlab:
- branch: main
- repo: https://github.com/kshtsk/ceph-cm-ansible.git
- skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs
- vars:
- ceph:
- conf:
- global:
- mon election default strategy: 3
- mgr:
- debug mgr: 20
- debug ms: 1
- mgr/cephadm/use_agent: True
- mon:
- debug mon: 20
- debug ms: 1
- debug paxos: 20
- osd:
- debug ms: 1
- debug osd: 20
- osd mclock iops capacity threshold hdd: 49000
- flavor: default
- log-ignorelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
MON_DOWN
-
mons down
-
mon down
-
out of quorum
-
CEPHADM_STRAY_HOST
-
CEPHADM_STRAY_DAEMON
-
CEPHADM_FAILED_DAEMON
- log-only-match:
- sha1: c24117fd5525679b799527bc1bd1f1dd0a2db5e2
- ceph-deploy:
- conf:
- client:
- log file: /var/log/ceph/ceph-$name.$pid.log
- mon:
- cephadm:
- cephadm_binary_url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm
- containers:
- image: harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-3
- install:
- ceph:
- flavor: default
- sha1: c24117fd5525679b799527bc1bd1f1dd0a2db5e2
- extra_system_packages:
- deb:
- rpm:
-
bzip2
-
perl-Test-Harness
-
python3-xmltodict
-
s3cmd
- repos:
-
- name: ceph-source
- priority: 1
- url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-47-gc24117fd552/el9.clyso/SRPMS
-
- name: ceph-noarch
- priority: 1
- url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-47-gc24117fd552/el9.clyso/noarch
-
- name: ceph
- priority: 1
- url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-47-gc24117fd552/el9.clyso/x86_64
- selinux:
- allowlist:
-
scontext=system_u:system_r:logrotate_t:s0
-
scontext=system_u:system_r:getty_t:s0
- workunit:
- branch: tt-19.2.3-fasttrack-3-no-nvme-loop
- sha1: 5726a36c3452e5b72190cfceba828abc62c819b7
- owner: irq0
- pid: 476253
- roles:
-
['host.a', 'mon.a', 'mgr.a', 'osd.0', 'osd.1']
-
['host.b', 'mon.b', 'mgr.b', 'osd.2', 'osd.3']
-
['host.c', 'mon.c', 'osd.4', 'osd.5']
- sentry_event: (Empty)
- status: pass
- success: True
- branch: cobaltcore-storage-v19.2.3-fasttrack-3
- seed: 6609
- sha1: c24117fd5525679b799527bc1bd1f1dd0a2db5e2
- subset: 1/64
- suite: orch:cephadm:workunits
- suite_branch: tt-19.2.3-fasttrack-3-no-nvme-loop
- suite_path: /home/teuthos/src/github.com_kshtsk_ceph_5726a36c3452e5b72190cfceba828abc62c819b7/qa
- suite_relpath: qa
- suite_repo: https://github.com/kshtsk/ceph.git
- suite_sha1: 5726a36c3452e5b72190cfceba828abc62c819b7
- targets:
- vm05.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBI5uTFn5SO961Jhp+aIHom0l/zednVTJnl2zNZ0vGCUVKWwcqpouTNPRHRwD56vBiXIZAiV6sjweSlTamRhwZJk=
- vm07.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMcRqfd/Uo+xIGSgXjm9xYdvVdnyzTf7sdIOWHd+6G2nqSmVd7NsIO4KjDtUkZfPxNTAgCKVUMjdr1ni45JG044=
- vm09.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDw68bBRfB0xa0EW7xhT/G4fgicbMzjSb25c9Al/GLfG8STOtB5MzT9oat1GoeBbr14K+zI/BlG9xsCSto9T3k4=
- tasks:
-
-
-
-
-
-
-
- internal.serialize_remote_roles
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- pexec:
- all:
-
sudo dnf remove nvme-cli -y
-
sudo dnf install nvmetcli nvme-cli -y
-
-
-
- cephadm.shell:
- host.a:
-
set -ex
HOSTNAMES=$(ceph orch host ls --format json | jq -r '.[] | .hostname')
for host in $HOSTNAMES; do
# find the hostname for "host.c" which will have no mgr
HAS_MGRS=$(ceph orch ps --hostname ${host} --format json | jq 'any(.daemon_type == "mgr")')
if [ "$HAS_MGRS" == "false" ]; then
HOST_C="${host}"
fi
done
# One last thing to worry about before draining the host
# is that the teuthology test tends to put the explicit
# hostnames in the placement for the mon service.
# We want to make sure we can drain without providing
# --force and there is a check for the host being removed
# being listed explicitly in the placements. Therefore,
# we should remove it from the mon placement.
ceph orch ls mon --export > mon.yaml
sed /"$HOST_C"/d mon.yaml > mon_adjusted.yaml
ceph orch apply -i mon_adjusted.yaml
# now drain that host
ceph orch host drain $HOST_C --zap-osd-devices
# wait for drain to complete
HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)
while [ "$HOST_C_DAEMONS" != "No daemons reported" ]; do
sleep 15
HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)
done
# we want to check the ability to remove the host from
# the CRUSH map, so we should first verify the host is in
# the CRUSH map.
ceph osd getcrushmap -o compiled-crushmap
crushtool -d compiled-crushmap -o crushmap.txt
CRUSH_MAP=$(cat crushmap.txt)
if ! grep -q "$HOST_C" <<< "$CRUSH_MAP"; then
printf "Expected to see $HOST_C in CRUSH map. Saw:\n\n$CRUSH_MAP"
exit 1
fi
# If the drain was successful, we should be able to remove the
# host without force with no issues. If there are still daemons
# we will get a response telling us to drain the host and a
# non-zero return code
ceph orch host rm $HOST_C --rm-crush-entry
# verify we've successfully removed the host from the CRUSH map
sleep 30
ceph osd getcrushmap -o compiled-crushmap
crushtool -d compiled-crushmap -o crushmap.txt
CRUSH_MAP=$(cat crushmap.txt)
if grep -q "$HOST_C" <<< "$CRUSH_MAP"; then
printf "Saw $HOST_C in CRUSH map after it should have been removed.\n\n$CRUSH_MAP"
exit 1
fi
- timestamp: 2026-03-06 13:20:18
- teuthology_branch: clyso-debian-13
- verbose: False
- pcp_grafana_url: (Empty)
- priority: 1000
- user: irq0
- queue: (Empty)
- posted: 2026-03-06 12:20:20
- started: 2026-03-06 12:26:42
- updated: 2026-03-06 12:39:50
- status_class: success
- runtime: 0:13:08
- wait_time: 0:03:41