Nodes: vm01 vm05 vm06

Description: rados/cephadm/workunits/{0-distro/ubuntu_22.04 agent/off mon_election/classic task/test_mgmt_gateway}

Log: https://schulp.build.clyso.com/a/kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps/4343/teuthology.log

  • log_href: https://schulp.build.clyso.com/a/kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps/4343/teuthology.log
  • archive_path: /archive/kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps/4343
  • description: rados/cephadm/workunits/{0-distro/ubuntu_22.04 agent/off mon_election/classic task/test_mgmt_gateway}
  • duration: 0:13:43
  • email: (Empty)
  • failure_reason: (Empty)
  • flavor: default
  • job_id: 4343
  • kernel: (Empty)
  • last_in_suite: False
  • machine_type: vps
  • name: kyr-2026-03-31_11:18:10-rados-tentacle-none-default-vps
  • nuke_on_error: (Empty)
  • os_type: ubuntu
  • os_version: 22.04
  • overrides:
    • admin_socket:
      • branch: tentacle
    • ansible.cephlab:
      • branch: main
      • repo: https://github.com/kshtsk/ceph-cm-ansible.git
      • skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs
      • vars:
        • logical_volumes:
          • lv_1:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_2:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_3:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
          • lv_4:
            • scratch_dev: True
            • size: 25%VG
            • vg: vg_nvme
        • timezone: UTC
        • volume_groups:
          • vg_nvme:
            • pvs: /dev/vdb,/dev/vdc,/dev/vdd,/dev/vde
    • ceph:
      • conf:
        • global:
          • mon election default strategy: 1
        • mgr:
          • debug mgr: 20
          • debug ms: 1
          • mgr/cephadm/use_agent: False
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
          • osd mclock iops capacity threshold hdd: 49000
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • CEPHADM_FAILED_DAEMON
      • log-only-match:
        • CEPHADM_
      • sha1: 5bb3278730741031382ca9c3dc9d221a942e06a2
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
    • cephadm:
      • cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm
    • install:
      • ceph:
        • flavor: default
        • sha1: 5bb3278730741031382ca9c3dc9d221a942e06a2
      • extra_system_packages:
        • deb:
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
        • rpm:
          • bzip2
          • perl-Test-Harness
          • python3-jmespath
          • python3-xmltodict
          • s3cmd
    • workunit:
      • branch: tt-tentacle
      • sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • owner: kyr
  • pid: 987869
  • roles:
    • ['host.a', 'mon.a', 'mgr.a', 'osd.0']
    • ['host.b', 'mon.b', 'mgr.b', 'osd.1']
    • ['host.c', 'mon.c', 'osd.2']
  • sentry_event: (Empty)
  • status: pass
  • success: True
  • branch: tentacle
  • seed: 6407
  • sha1: 5bb3278730741031382ca9c3dc9d221a942e06a2
  • subset: 1/100000
  • suite: rados
  • suite_branch: tt-tentacle
  • suite_path: /home/teuthos/src/github.com_kshtsk_ceph_0392f78529848ec72469e8e431875cb98d3a5fb4/qa
  • suite_relpath: qa
  • suite_repo: https://github.com/kshtsk/ceph.git
  • suite_sha1: 0392f78529848ec72469e8e431875cb98d3a5fb4
  • targets:
    • vm01.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNZhSXiJ6HmktE7Jpcd4KqGqn3gMwfcXojJpcPQzMZxvIBkP+uov4tpyh9F3kDEZQtevyuf7OLrIqYzy36Gc1r8=
    • vm05.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEA59JSle68O4OuVsMJIh796zogDHziY/Hcp2nEuglTWicKo3nUYo5k74UntPQuRnMPoLe/I5imhgXXZJW5UMoI=
    • vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCzZOLaOq6chCjCEUVxsxgf8Iq0nk3OaSJ4dhlwkjtsBOGEEYtE/hpdgP7ubOOWaIlRFqyX/WwK9MTlgnE0RATQ=
  • tasks:
      • internal.check_packages
      • internal.buildpackages_prep
      • internal.save_config
      • internal.check_lock
      • internal.add_remotes
      • console_log
      • internal.connect
      • internal.push_inventory
      • internal.serialize_remote_roles
      • internal.check_conflict
      • internal.check_ceph_data
      • internal.vm_setup
      • internal.base
      • internal.archive_upload
      • internal.archive
      • internal.coredump
      • internal.sudo
      • internal.syslog
      • internal.timer
      • pcp
      • selinux
      • ansible.cephlab
      • clock
      • install
      • cephadm
      • cephadm.shell:
        • host.c:
          • set -ex # Deploy monitoring stack ceph orch apply node-exporter ceph orch apply grafana ceph orch apply alertmanager ceph orch apply prometheus sleep 240 # generate SSL certificate openssl req -x509 -newkey rsa:4096 -keyout /tmp/key.pem -out /tmp/cert.pem -sha256 -days 30 -nodes -subj "/CN=*" # Generate a mgmt.spec template cat << EOT > /tmp/mgmt.spec service_type: mgmt-gateway service_id: foo placement: hosts: - ${HOSTNAME} spec: ssl_protocols: - TLSv1.2 - TLSv1.3 ssl_ciphers: - AES128-SHA - AES256-SHA enable_health_check_endpoint: True EOT # Add generated certificates to spec file echo " ssl_cert: |" >> /tmp/mgmt.spec while read LINE; do echo $LINE | sed -e "s/^/ /"; done < /tmp/cert.pem >> /tmp/mgmt.spec echo " ssl_key: |" >> /tmp/mgmt.spec while read LINE; do echo $LINE | sed -e "s/^/ /"; done < /tmp/key.pem >> /tmp/mgmt.spec # Apply spec ceph orch apply -i /tmp/mgmt.spec
      • cephadm.wait_for_service:
        • service: mgmt-gateway
      • cephadm.shell:
        • host.a:
          • set -ex # Function to wait for a service to be healthy and log response on error wait_for_service() { local name="$1" local url="$2" local jq_filter="$3" echo "Waiting for service $name to be healthy at $url..." for i in {1..30}; do local response response=$(curl -k -s -u admin:admin "$url") if echo "$response" | jq -e "$jq_filter" > /dev/null; then echo "Service $name is healthy." return 0 fi echo "Attempt $i: service $name not ready yet" sleep 10 done echo "Timeout waiting for $name at $url" echo "Last HTTP response:" echo "$response" echo "jq output:" echo "$response" | jq "$jq_filter" || echo "(jq parse error or no match)" return 1 } # retrieve mgmt hostname and ip MGMT_GTW_HOST=$(ceph orch ps --daemon-type mgmt-gateway -f json | jq -e '.[]' | jq -r '.hostname') MGMT_GTW_IP=$(ceph orch host ls -f json | jq -r --arg MGMT_GTW_HOST "$MGMT_GTW_HOST" '.[] | select(.hostname==$MGMT_GTW_HOST) | .addr') # check mgmt-gateway health curl -k -s https://${MGMT_GTW_IP}/health curl -k -s https://${MGMT_GTW_IP}:29443/health # wait for monitoring services wait_for_service "Grafana" "https://${MGMT_GTW_IP}/grafana/api/health" '.database == "ok"' || exit 1 wait_for_service "Prometheus" "https://${MGMT_GTW_IP}/prometheus/api/v1/status/config" '.status == "success"' || exit 1 wait_for_service "Alertmanager" "https://${MGMT_GTW_IP}/alertmanager/api/v2/status" '.cluster.status == "ready"' || exit 1
  • timestamp: 2026-03-31 11:18:10
  • teuthology_branch: uv2
  • verbose: False
  • pcp_grafana_url: (Empty)
  • priority: 1000
  • user: kyr
  • queue: (Empty)
  • posted: 2026-03-31 11:18:51
  • started: 2026-03-31 19:37:36
  • updated: 2026-03-31 20:31:46
  • status_class: success
  • runtime: 0:54:10
  • wait_time: 0:40:27