Nodes: vm01 vm08

Description: orch/cephadm/mgr-nfs-upgrade/{0-centos_9.stream 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final}

Log: https://schulp.build.clyso.com/a/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/998/teuthology.log

Failure Reason:

hit max job timeout

  • log_href: https://schulp.build.clyso.com/a/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/998/teuthology.log
  • archive_path: /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/998
  • description: orch/cephadm/mgr-nfs-upgrade/{0-centos_9.stream 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final}
  • duration: (Empty)
  • email: (Empty)
  • failure_reason: hit max job timeout
  • flavor: default
  • job_id: 998
  • kernel: (Empty)
  • last_in_suite: False
  • machine_type: vps
  • name: kyr-2026-03-10_01:00:38-orch-squid-none-default-vps
  • nuke_on_error: (Empty)
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: squid
    • ansible.cephlab:
      • branch: main
      • skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs
      • vars:
        • timezone: UTC
    • ceph:
      • conf:
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
          • osd mclock iops capacity threshold hdd: 49000
          • osd shutdown pgref assert: True
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • CEPHADM_REFRESH_FAILED
      • log-only-match:
        • CEPHADM_
      • sha1: e911bdebe5c8faa3800735d1568fcdca65db60df
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
    • install:
      • ceph:
        • flavor: default
        • sha1: e911bdebe5c8faa3800735d1568fcdca65db60df
      • extra_system_packages:
        • deb:
          • python3-xmltodict
          • python3-jmespath
        • rpm:
          • bzip2
          • perl-Test-Harness
          • python3-xmltodict
          • python3-jmespath
    • selinux:
      • allowlist:
        • scontext=system_u:system_r:logrotate_t:s0
        • scontext=system_u:system_r:getty_t:s0
    • workunit:
      • branch: tt-squid
      • sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b
  • owner: kyr
  • pid: 1626660
  • roles:
    • ['host.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0']
    • ['host.b', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
  • sentry_event: (Empty)
  • status: dead
  • success: (Empty)
  • branch: squid
  • seed: 8043
  • sha1: e911bdebe5c8faa3800735d1568fcdca65db60df
  • subset: 1/64
  • suite: orch
  • suite_branch: tt-squid
  • suite_path: /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa
  • suite_relpath: qa
  • suite_repo: https://github.com/kshtsk/ceph.git
  • suite_sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b
  • targets:
    • vm01.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIB3b62HvfiHCT3NIL2CQH8mjNM0kZvGR0BnQYF6SL7eodqPHGYFs74mB/ixlUQPurYhTqbnDOJFhyS1a+mSsHs=
    • vm08.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFKmKa0uHJuHAaub56X2bIQAXIIcDdeFpOYhsfcbZY+R2a5f56HDw6DpzC9G952xXXwtd1b4KKd8PY6nruUo/2Q=
  • tasks:
      • cephadm:
        • cephadm_branch: v17.2.0
        • cephadm_git_url: https://github.com/ceph/ceph
        • image: quay.io/ceph/ceph:v17.2.0
        • roleless: True
      • cephadm.shell:
        • host.a:
          • ceph orch status
          • ceph orch ps
          • ceph orch ls
          • ceph orch host ls
          • ceph orch device ls
      • vip.exec:
        • all-hosts:
          • systemctl stop nfs-server
      • cephadm.shell:
        • host.a:
          • ceph fs volume create foofs
      • cephadm.wait_for_service:
        • service: mds.foofs
      • cephadm.shell:
        • host.a:
          • ceph nfs cluster create foo --placement=2 || ceph nfs cluster create cephfs foo --placement=2
          • ceph nfs export create cephfs --fsname foofs --clusterid foo --binding /fake || ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake
          • while ! ceph orch ls | grep nfs | grep 2/2 ; do sleep 1 ; done
      • vip.exec:
        • host.a:
          • mkdir /mnt/foo
          • while ! mount -t nfs $(hostname):/fake /mnt/foo -o sync ; do sleep 5 ; done
          • echo test > /mnt/foo/testfile
          • sync
      • parallel:
        • upgrade-tasks
        • workload-tasks
      • vip.exec:
        • host.a:
          • umount /mnt/foo
      • cephadm.shell:
        • host.a:
          • ceph nfs cluster ls | grep foo
          • ceph nfs export ls foo --detailed
          • rados -p .nfs --all ls -
      • cephadm.shell:
        • host.a:
          • set -ex [[ `ceph config get mgr mgr/cephadm/migration_current` -gt 2 ]]
  • timestamp: 2026-03-10 01:00:38
  • teuthology_branch: clyso-debian-13
  • verbose: False
  • pcp_grafana_url: (Empty)
  • priority: 1000
  • user: kyr
  • queue: (Empty)
  • posted: 2026-03-10 01:01:17
  • started: 2026-03-10 10:10:31
  • updated: 2026-03-10 12:19:41
  • status_class: danger
  • runtime: 2:09:10