2026-04-16T19:12:38.568 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-04-16T19:12:38.575 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-04-16T19:12:38.601 INFO:teuthology.run:Config: archive_path: /archive/supriti-2026-04-16_15:21:55-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5549 branch: wip-sse-s3-on-v20.2.0 description: orch:cephadm:smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} email: null first_in_suite: false flavor: default job_id: '5549' ktype: distro last_in_suite: false machine_type: vps name: supriti-2026-04-16_15:21:55-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: ubuntu os_version: '22.04' overrides: admin_socket: branch: wip-sse-s3-on-v20.2.0 ansible.cephlab: branch: main repo: https://github.com/kshtsk/ceph-cm-ansible.git skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: logical_volumes: lv_1: scratch_dev: true size: 25%VG vg: vg_nvme lv_2: scratch_dev: true size: 25%VG vg: vg_nvme lv_3: scratch_dev: true size: 25%VG vg: vg_nvme lv_4: scratch_dev: true size: 25%VG vg: vg_nvme timezone: UTC volume_groups: vg_nvme: pvs: /dev/vdb,/dev/vdc,/dev/vdd,/dev/vde ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: c03ba9ecf58a4116bdd5049c6e392c7a287bc4f8 ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} cephadm: cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm containers: image: harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 install: ceph: flavor: default sha1: c03ba9ecf58a4116bdd5049c6e392c7a287bc4f8 extra_system_packages: deb: - python3-jmespath - python3-xmltodict - s3cmd rpm: - bzip2 - perl-Test-Harness - python3-jmespath - python3-xmltodict - s3cmd repos: - name: ceph-source priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-21-gc03ba9ecf58/el9.clyso/SRPMS - name: ceph-noarch priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-21-gc03ba9ecf58/el9.clyso/noarch - name: ceph priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-21-gc03ba9ecf58/el9.clyso/x86_64 s3tests: sha1: e0c4ff71baef6d5126a0201df5fe54196d89b296 workunit: branch: tt-wip-sse-s3-on-v20.2.0 sha1: 909b66e106532fd1f1a49171c3f2eb7a193a6d0b owner: supriti priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 3747 sha1: c03ba9ecf58a4116bdd5049c6e392c7a287bc4f8 sleep_before_teardown: 0 suite: orch:cephadm:smoke-roleless suite_branch: tt-wip-sse-s3-on-v20.2.0 suite_path: /home/teuthos/src/git.local_ceph_909b66e106532fd1f1a49171c3f2eb7a193a6d0b/qa suite_relpath: qa suite_repo: http://git.local/ceph.git suite_sha1: 909b66e106532fd1f1a49171c3f2eb7a193a6d0b targets: vm01.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJUddllhtNGRKrGP8Em9ts+l29jpGNlWaitWFI+W9mf41zKoJX6sKzN479i1hqvSqhqCki5hHVWhcHDy2i+z8vU= vm04.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEGtvRViXP64vTFzpa3KKMX3s3JZ66KkgvKwyKQtfuuMD5DEWBWLO9GgJF+NNNmMpMrR35eA3Zp1czZFpw5WNVs= tasks: - nvme_loop: null - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip: null - cephadm.shell: host.a: - ceph orch device ls --refresh - cephadm.apply: specs: - placement: count: 4 host_pattern: '*' service_id: foo service_type: rgw spec: rgw_frontend_port: 8000 - placement: count: 2 service_id: rgw.foo service_type: ingress spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: '{{VIP0}}/{{VIPPREFIXLEN}}' - cephadm.wait_for_service: service: rgw.foo - cephadm.wait_for_service: service: ingress.rgw.foo - cephadm.shell: host.a: - "echo \"Check while healthy...\"\ncurl http://{{VIP0}}:9000/\n\n# stop each\ \ rgw in turn\necho \"Check with each rgw stopped in turn...\"\nfor rgw in `ceph\ \ orch ps | grep ^rgw.foo. | awk '{print $1}'`; do\n ceph orch daemon stop\ \ $rgw\n timeout 300 bash -c \"while ! ceph orch ps | grep $rgw | grep stopped;\ \ do echo 'Waiting for $rgw to stop'; ceph orch ps --daemon-type rgw; ceph health\ \ detail; sleep 5 ; done\"\n timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/\ \ ; do echo 'Waiting for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\ \n ceph orch daemon start $rgw\n timeout 300 bash -c \"while ! ceph orch ps\ \ | grep $rgw | grep running; do echo 'Waiting for $rgw to start'; ceph orch\ \ ps --daemon-type rgw; ceph health detail; sleep 5 ; done\"\ndone\n\n# stop\ \ each haproxy in turn\necho \"Check with each haproxy down in turn...\"\nfor\ \ haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do\n\ \ ceph orch daemon stop $haproxy\n timeout 300 bash -c \"while ! ceph orch\ \ ps | grep $haproxy | grep stopped; do echo 'Waiting for $haproxy to stop';\ \ ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\n\ \ timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting\ \ for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\n ceph orch\ \ daemon start $haproxy\n timeout 300 bash -c \"while ! ceph orch ps | grep\ \ $haproxy | grep running; do echo 'Waiting for $haproxy to start'; ceph orch\ \ ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\ndone\n\ntimeout\ \ 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting for http://{{VIP0}}:9000/\ \ to be available'; sleep 1 ; done\"\n" - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/kshtsk/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-04-16_15:21:55 tube: vps user: supriti verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.3072398 2026-04-16T19:12:38.601 INFO:teuthology.run:suite_path is set to /home/teuthos/src/git.local_ceph_909b66e106532fd1f1a49171c3f2eb7a193a6d0b/qa; will attempt to use it 2026-04-16T19:12:38.602 INFO:teuthology.run:Found tasks at /home/teuthos/src/git.local_ceph_909b66e106532fd1f1a49171c3f2eb7a193a6d0b/qa/tasks 2026-04-16T19:12:38.602 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-04-16T19:12:38.602 INFO:teuthology.task.internal:Saving configuration 2026-04-16T19:12:38.611 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-04-16T19:12:38.612 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-04-16T19:12:38.619 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm01.local', 'description': '/archive/supriti-2026-04-16_15:21:55-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5549', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'ubuntu', 'os_version': '22.04', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-04-16 19:11:23.339576', 'locked_by': 'supriti', 'mac_address': '52:55:00:00:00:01', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJUddllhtNGRKrGP8Em9ts+l29jpGNlWaitWFI+W9mf41zKoJX6sKzN479i1hqvSqhqCki5hHVWhcHDy2i+z8vU='} 2026-04-16T19:12:38.627 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm04.local', 'description': '/archive/supriti-2026-04-16_15:21:55-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5549', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'ubuntu', 'os_version': '22.04', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-04-16 19:11:23.340464', 'locked_by': 'supriti', 'mac_address': '52:55:00:00:00:04', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEGtvRViXP64vTFzpa3KKMX3s3JZ66KkgvKwyKQtfuuMD5DEWBWLO9GgJF+NNNmMpMrR35eA3Zp1czZFpw5WNVs='} 2026-04-16T19:12:38.627 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-04-16T19:12:38.627 INFO:teuthology.task.internal:roles: ubuntu@vm01.local - ['host.a', 'client.0'] 2026-04-16T19:12:38.627 INFO:teuthology.task.internal:roles: ubuntu@vm04.local - ['host.b', 'client.1'] 2026-04-16T19:12:38.627 INFO:teuthology.run_tasks:Running task console_log... 2026-04-16T19:12:38.636 DEBUG:teuthology.task.console_log:vm01 does not support IPMI; excluding 2026-04-16T19:12:38.643 DEBUG:teuthology.task.console_log:vm04 does not support IPMI; excluding 2026-04-16T19:12:38.644 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fd2b4cec700>, signals=[15]) 2026-04-16T19:12:38.644 INFO:teuthology.run_tasks:Running task internal.connect... 2026-04-16T19:12:38.645 INFO:teuthology.task.internal:Opening connections... 2026-04-16T19:12:38.645 DEBUG:teuthology.task.internal:connecting to ubuntu@vm01.local 2026-04-16T19:12:38.646 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-16T19:12:38.708 DEBUG:teuthology.task.internal:connecting to ubuntu@vm04.local 2026-04-16T19:12:38.709 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-16T19:12:38.768 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-04-16T19:12:38.769 DEBUG:teuthology.orchestra.run.vm01:> uname -m 2026-04-16T19:12:38.798 INFO:teuthology.orchestra.run.vm01.stdout:x86_64 2026-04-16T19:12:38.798 DEBUG:teuthology.orchestra.run.vm01:> cat /etc/os-release 2026-04-16T19:12:38.841 INFO:teuthology.orchestra.run.vm01.stdout:PRETTY_NAME="Ubuntu 22.04.5 LTS" 2026-04-16T19:12:38.841 INFO:teuthology.orchestra.run.vm01.stdout:NAME="Ubuntu" 2026-04-16T19:12:38.841 INFO:teuthology.orchestra.run.vm01.stdout:VERSION_ID="22.04" 2026-04-16T19:12:38.841 INFO:teuthology.orchestra.run.vm01.stdout:VERSION="22.04.5 LTS (Jammy Jellyfish)" 2026-04-16T19:12:38.841 INFO:teuthology.orchestra.run.vm01.stdout:VERSION_CODENAME=jammy 2026-04-16T19:12:38.841 INFO:teuthology.orchestra.run.vm01.stdout:ID=ubuntu 2026-04-16T19:12:38.842 INFO:teuthology.orchestra.run.vm01.stdout:ID_LIKE=debian 2026-04-16T19:12:38.842 INFO:teuthology.orchestra.run.vm01.stdout:HOME_URL="https://www.ubuntu.com/" 2026-04-16T19:12:38.842 INFO:teuthology.orchestra.run.vm01.stdout:SUPPORT_URL="https://help.ubuntu.com/" 2026-04-16T19:12:38.842 INFO:teuthology.orchestra.run.vm01.stdout:BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 2026-04-16T19:12:38.842 INFO:teuthology.orchestra.run.vm01.stdout:PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 2026-04-16T19:12:38.842 INFO:teuthology.orchestra.run.vm01.stdout:UBUNTU_CODENAME=jammy 2026-04-16T19:12:38.842 INFO:teuthology.lock.ops:Updating vm01.local on lock server 2026-04-16T19:12:38.848 DEBUG:teuthology.orchestra.run.vm04:> uname -m 2026-04-16T19:12:38.861 INFO:teuthology.orchestra.run.vm04.stdout:x86_64 2026-04-16T19:12:38.861 DEBUG:teuthology.orchestra.run.vm04:> cat /etc/os-release 2026-04-16T19:12:38.906 INFO:teuthology.orchestra.run.vm04.stdout:PRETTY_NAME="Ubuntu 22.04.5 LTS" 2026-04-16T19:12:38.907 INFO:teuthology.orchestra.run.vm04.stdout:NAME="Ubuntu" 2026-04-16T19:12:38.907 INFO:teuthology.orchestra.run.vm04.stdout:VERSION_ID="22.04" 2026-04-16T19:12:38.907 INFO:teuthology.orchestra.run.vm04.stdout:VERSION="22.04.5 LTS (Jammy Jellyfish)" 2026-04-16T19:12:38.907 INFO:teuthology.orchestra.run.vm04.stdout:VERSION_CODENAME=jammy 2026-04-16T19:12:38.907 INFO:teuthology.orchestra.run.vm04.stdout:ID=ubuntu 2026-04-16T19:12:38.907 INFO:teuthology.orchestra.run.vm04.stdout:ID_LIKE=debian 2026-04-16T19:12:38.907 INFO:teuthology.orchestra.run.vm04.stdout:HOME_URL="https://www.ubuntu.com/" 2026-04-16T19:12:38.907 INFO:teuthology.orchestra.run.vm04.stdout:SUPPORT_URL="https://help.ubuntu.com/" 2026-04-16T19:12:38.907 INFO:teuthology.orchestra.run.vm04.stdout:BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 2026-04-16T19:12:38.907 INFO:teuthology.orchestra.run.vm04.stdout:PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 2026-04-16T19:12:38.907 INFO:teuthology.orchestra.run.vm04.stdout:UBUNTU_CODENAME=jammy 2026-04-16T19:12:38.907 INFO:teuthology.lock.ops:Updating vm04.local on lock server 2026-04-16T19:12:38.913 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-04-16T19:12:38.915 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-04-16T19:12:38.916 INFO:teuthology.task.internal:Checking for old test directory... 2026-04-16T19:12:38.916 DEBUG:teuthology.orchestra.run.vm01:> test '!' -e /home/ubuntu/cephtest 2026-04-16T19:12:38.917 DEBUG:teuthology.orchestra.run.vm04:> test '!' -e /home/ubuntu/cephtest 2026-04-16T19:12:38.950 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-04-16T19:12:38.952 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-04-16T19:12:38.952 DEBUG:teuthology.orchestra.run.vm01:> test -z $(ls -A /var/lib/ceph) 2026-04-16T19:12:38.964 DEBUG:teuthology.orchestra.run.vm04:> test -z $(ls -A /var/lib/ceph) 2026-04-16T19:12:38.966 INFO:teuthology.orchestra.run.vm01.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-04-16T19:12:38.995 INFO:teuthology.orchestra.run.vm04.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-04-16T19:12:38.995 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-04-16T19:12:39.005 DEBUG:teuthology.orchestra.run.vm01:> test -e /ceph-qa-ready 2026-04-16T19:12:39.013 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-16T19:12:39.263 DEBUG:teuthology.orchestra.run.vm04:> test -e /ceph-qa-ready 2026-04-16T19:12:39.267 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-16T19:12:39.538 INFO:teuthology.run_tasks:Running task internal.base... 2026-04-16T19:12:39.539 INFO:teuthology.task.internal:Creating test directory... 2026-04-16T19:12:39.539 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-04-16T19:12:39.540 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-04-16T19:12:39.544 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-04-16T19:12:39.545 INFO:teuthology.run_tasks:Running task internal.archive... 2026-04-16T19:12:39.546 INFO:teuthology.task.internal:Creating archive directory... 2026-04-16T19:12:39.546 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-04-16T19:12:39.587 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-04-16T19:12:39.592 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-04-16T19:12:39.593 INFO:teuthology.task.internal:Enabling coredump saving... 2026-04-16T19:12:39.593 DEBUG:teuthology.orchestra.run.vm01:> test -f /run/.containerenv -o -f /.dockerenv 2026-04-16T19:12:39.633 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-16T19:12:39.633 DEBUG:teuthology.orchestra.run.vm04:> test -f /run/.containerenv -o -f /.dockerenv 2026-04-16T19:12:39.636 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-16T19:12:39.636 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-04-16T19:12:39.675 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-04-16T19:12:39.683 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-16T19:12:39.688 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-16T19:12:39.689 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-16T19:12:39.697 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-16T19:12:39.698 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-04-16T19:12:39.700 INFO:teuthology.task.internal:Configuring sudo... 2026-04-16T19:12:39.700 DEBUG:teuthology.orchestra.run.vm01:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-04-16T19:12:39.731 DEBUG:teuthology.orchestra.run.vm04:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-04-16T19:12:39.749 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-04-16T19:12:39.752 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-04-16T19:12:39.752 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-04-16T19:12:39.783 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-04-16T19:12:39.795 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-16T19:12:39.829 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-16T19:12:39.873 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:12:39.873 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-04-16T19:12:39.922 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-16T19:12:39.926 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-16T19:12:39.971 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-16T19:12:39.971 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-04-16T19:12:40.020 DEBUG:teuthology.orchestra.run.vm01:> sudo service rsyslog restart 2026-04-16T19:12:40.021 DEBUG:teuthology.orchestra.run.vm04:> sudo service rsyslog restart 2026-04-16T19:12:40.082 INFO:teuthology.run_tasks:Running task internal.timer... 2026-04-16T19:12:40.083 INFO:teuthology.task.internal:Starting timer... 2026-04-16T19:12:40.084 INFO:teuthology.run_tasks:Running task pcp... 2026-04-16T19:12:40.086 INFO:teuthology.run_tasks:Running task selinux... 2026-04-16T19:12:40.088 INFO:teuthology.task.selinux:Excluding vm01: VMs are not yet supported 2026-04-16T19:12:40.088 INFO:teuthology.task.selinux:Excluding vm04: VMs are not yet supported 2026-04-16T19:12:40.088 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-04-16T19:12:40.088 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-04-16T19:12:40.088 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-04-16T19:12:40.088 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-04-16T19:12:40.090 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'repo': 'https://github.com/kshtsk/ceph-cm-ansible.git', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'logical_volumes': {'lv_1': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_2': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_3': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_4': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}}, 'timezone': 'UTC', 'volume_groups': {'vg_nvme': {'pvs': '/dev/vdb,/dev/vdc,/dev/vdd,/dev/vde'}}}} 2026-04-16T19:12:40.090 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/kshtsk/ceph-cm-ansible.git 2026-04-16T19:12:40.091 INFO:teuthology.repo_utils:Fetching github.com_kshtsk_ceph-cm-ansible_main from origin 2026-04-16T19:12:40.915 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main to origin/main 2026-04-16T19:12:40.921 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-04-16T19:12:40.922 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "logical_volumes": {"lv_1": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_2": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_3": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_4": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}}, "timezone": "UTC", "volume_groups": {"vg_nvme": {"pvs": "/dev/vdb,/dev/vdc,/dev/vdd,/dev/vde"}}}' -i /tmp/teuth_ansible_inventory3wv5xlca --limit vm01.local,vm04.local /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-04-16T19:18:43.920 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm01.local'), Remote(name='ubuntu@vm04.local')] 2026-04-16T19:18:43.921 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm01.local' 2026-04-16T19:18:43.922 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-16T19:18:43.985 DEBUG:teuthology.orchestra.run.vm01:> true 2026-04-16T19:18:44.061 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm01.local' 2026-04-16T19:18:44.062 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm04.local' 2026-04-16T19:18:44.062 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-16T19:18:44.128 DEBUG:teuthology.orchestra.run.vm04:> true 2026-04-16T19:18:44.369 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm04.local' 2026-04-16T19:18:44.369 INFO:teuthology.run_tasks:Running task clock... 2026-04-16T19:18:44.372 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-04-16T19:18:44.372 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-04-16T19:18:44.373 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-16T19:18:44.374 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-04-16T19:18:44.374 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-16T19:18:44.391 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: ntpd 4.2.8p15@1.3728-o Wed Feb 16 17:13:02 UTC 2022 (1): Starting 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: Command line: ntpd -gq 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: ---------------------------------------------------- 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: ntp-4 is maintained by Network Time Foundation, 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: Inc. (NTF), a non-profit 501(c)(3) public-benefit 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: corporation. Support and training for ntp-4 are 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: available at https://www.nwtime.org/support 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: ---------------------------------------------------- 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: proto: precision = 0.029 usec (-25) 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: basedate set to 2022-02-04 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: gps base set to 2022-02-06 (week 2196) 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): good hash signature 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): loaded, expire=2025-12-28T00:00:00Z last=2017-01-01T00:00:00Z ofs=37 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stderr:16 Apr 19:18:44 ntpd[16383]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): expired 110 days ago 2026-04-16T19:18:44.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: Listen and drop on 0 v6wildcard [::]:123 2026-04-16T19:18:44.393 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: Listen and drop on 1 v4wildcard 0.0.0.0:123 2026-04-16T19:18:44.393 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: Listen normally on 2 lo 127.0.0.1:123 2026-04-16T19:18:44.393 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: Listen normally on 3 ens3 192.168.123.101:123 2026-04-16T19:18:44.393 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: Listen normally on 4 lo [::1]:123 2026-04-16T19:18:44.393 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: Listen normally on 5 ens3 [fe80::5055:ff:fe00:1%2]:123 2026-04-16T19:18:44.393 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:44 ntpd[16383]: Listening on routing socket on fd #22 for interface updates 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: ntpd 4.2.8p15@1.3728-o Wed Feb 16 17:13:02 UTC 2022 (1): Starting 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: Command line: ntpd -gq 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: ---------------------------------------------------- 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: ntp-4 is maintained by Network Time Foundation, 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: Inc. (NTF), a non-profit 501(c)(3) public-benefit 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: corporation. Support and training for ntp-4 are 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: available at https://www.nwtime.org/support 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: ---------------------------------------------------- 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: proto: precision = 0.029 usec (-25) 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: basedate set to 2022-02-04 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: gps base set to 2022-02-06 (week 2196) 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): good hash signature 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): loaded, expire=2025-12-28T00:00:00Z last=2017-01-01T00:00:00Z ofs=37 2026-04-16T19:18:44.428 INFO:teuthology.orchestra.run.vm04.stderr:16 Apr 19:18:44 ntpd[16299]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): expired 110 days ago 2026-04-16T19:18:44.429 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: Listen and drop on 0 v6wildcard [::]:123 2026-04-16T19:18:44.429 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: Listen and drop on 1 v4wildcard 0.0.0.0:123 2026-04-16T19:18:44.429 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: Listen normally on 2 lo 127.0.0.1:123 2026-04-16T19:18:44.429 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: Listen normally on 3 ens3 192.168.123.104:123 2026-04-16T19:18:44.429 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: Listen normally on 4 lo [::1]:123 2026-04-16T19:18:44.429 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: Listen normally on 5 ens3 [fe80::5055:ff:fe00:4%2]:123 2026-04-16T19:18:44.429 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:44 ntpd[16299]: Listening on routing socket on fd #22 for interface updates 2026-04-16T19:18:45.393 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:45 ntpd[16383]: Soliciting pool server 176.9.44.212 2026-04-16T19:18:45.429 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:45 ntpd[16299]: Soliciting pool server 176.9.44.212 2026-04-16T19:18:46.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:46 ntpd[16383]: Soliciting pool server 185.232.69.65 2026-04-16T19:18:46.393 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:46 ntpd[16383]: Soliciting pool server 93.177.65.20 2026-04-16T19:18:46.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:46 ntpd[16299]: Soliciting pool server 185.232.69.65 2026-04-16T19:18:46.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:46 ntpd[16299]: Soliciting pool server 93.177.65.20 2026-04-16T19:18:47.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:47 ntpd[16383]: Soliciting pool server 194.59.205.229 2026-04-16T19:18:47.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:47 ntpd[16383]: Soliciting pool server 5.189.151.39 2026-04-16T19:18:47.393 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:47 ntpd[16383]: Soliciting pool server 81.3.27.46 2026-04-16T19:18:47.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:47 ntpd[16299]: Soliciting pool server 194.59.205.229 2026-04-16T19:18:47.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:47 ntpd[16299]: Soliciting pool server 5.189.151.39 2026-04-16T19:18:47.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:47 ntpd[16299]: Soliciting pool server 81.3.27.46 2026-04-16T19:18:48.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:48 ntpd[16383]: Soliciting pool server 148.251.235.164 2026-04-16T19:18:48.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:48 ntpd[16383]: Soliciting pool server 5.75.181.179 2026-04-16T19:18:48.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:48 ntpd[16383]: Soliciting pool server 94.16.122.152 2026-04-16T19:18:48.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:48 ntpd[16383]: Soliciting pool server 144.91.126.59 2026-04-16T19:18:48.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:48 ntpd[16299]: Soliciting pool server 148.251.235.164 2026-04-16T19:18:48.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:48 ntpd[16299]: Soliciting pool server 5.75.181.179 2026-04-16T19:18:48.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:48 ntpd[16299]: Soliciting pool server 94.16.122.152 2026-04-16T19:18:48.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:48 ntpd[16299]: Soliciting pool server 144.91.126.59 2026-04-16T19:18:49.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:49 ntpd[16383]: Soliciting pool server 185.16.60.96 2026-04-16T19:18:49.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:49 ntpd[16383]: Soliciting pool server 144.76.76.107 2026-04-16T19:18:49.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:49 ntpd[16383]: Soliciting pool server 5.45.97.204 2026-04-16T19:18:49.393 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:49 ntpd[16383]: Soliciting pool server 185.125.190.58 2026-04-16T19:18:49.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:49 ntpd[16299]: Soliciting pool server 185.16.60.96 2026-04-16T19:18:49.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:49 ntpd[16299]: Soliciting pool server 144.76.76.107 2026-04-16T19:18:49.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:49 ntpd[16299]: Soliciting pool server 5.45.97.204 2026-04-16T19:18:49.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:49 ntpd[16299]: Soliciting pool server 185.125.190.58 2026-04-16T19:18:50.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:50 ntpd[16383]: Soliciting pool server 91.189.91.157 2026-04-16T19:18:50.392 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:50 ntpd[16383]: Soliciting pool server 129.70.132.35 2026-04-16T19:18:50.393 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:50 ntpd[16383]: Soliciting pool server 37.221.199.157 2026-04-16T19:18:50.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:50 ntpd[16299]: Soliciting pool server 91.189.91.157 2026-04-16T19:18:50.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:50 ntpd[16299]: Soliciting pool server 129.70.132.35 2026-04-16T19:18:50.428 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:50 ntpd[16299]: Soliciting pool server 37.221.199.157 2026-04-16T19:18:53.421 INFO:teuthology.orchestra.run.vm01.stdout:16 Apr 19:18:53 ntpd[16383]: ntpd: time slew -0.002106 s 2026-04-16T19:18:53.421 INFO:teuthology.orchestra.run.vm01.stdout:ntpd: time slew -0.002106s 2026-04-16T19:18:53.447 INFO:teuthology.orchestra.run.vm01.stdout: remote refid st t when poll reach delay offset jitter 2026-04-16T19:18:53.447 INFO:teuthology.orchestra.run.vm01.stdout:============================================================================== 2026-04-16T19:18:53.447 INFO:teuthology.orchestra.run.vm01.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:18:53.447 INFO:teuthology.orchestra.run.vm01.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:18:53.447 INFO:teuthology.orchestra.run.vm01.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:18:53.447 INFO:teuthology.orchestra.run.vm01.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:18:53.447 INFO:teuthology.orchestra.run.vm01.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:18:54.456 INFO:teuthology.orchestra.run.vm04.stdout:16 Apr 19:18:54 ntpd[16299]: ntpd: time slew +0.000139 s 2026-04-16T19:18:54.456 INFO:teuthology.orchestra.run.vm04.stdout:ntpd: time slew +0.000139s 2026-04-16T19:18:54.476 INFO:teuthology.orchestra.run.vm04.stdout: remote refid st t when poll reach delay offset jitter 2026-04-16T19:18:54.476 INFO:teuthology.orchestra.run.vm04.stdout:============================================================================== 2026-04-16T19:18:54.476 INFO:teuthology.orchestra.run.vm04.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:18:54.476 INFO:teuthology.orchestra.run.vm04.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:18:54.476 INFO:teuthology.orchestra.run.vm04.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:18:54.476 INFO:teuthology.orchestra.run.vm04.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:18:54.476 INFO:teuthology.orchestra.run.vm04.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:18:54.476 INFO:teuthology.run_tasks:Running task nvme_loop... 2026-04-16T19:18:54.479 INFO:tasks.nvme_loop:Setting up nvme_loop on scratch devices... 2026-04-16T19:18:54.479 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:18:54.479 DEBUG:teuthology.orchestra.run.vm01:> dd if=/scratch_devs of=/dev/stdout 2026-04-16T19:18:54.483 DEBUG:teuthology.misc:devs=['/dev/vg_nvme/lv_1', '/dev/vg_nvme/lv_2', '/dev/vg_nvme/lv_3', '/dev/vg_nvme/lv_4'] 2026-04-16T19:18:54.483 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vg_nvme/lv_1 2026-04-16T19:18:54.531 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vg_nvme/lv_1 -> ../dm-0 2026-04-16T19:18:54.531 INFO:teuthology.orchestra.run.vm01.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-16T19:18:54.531 INFO:teuthology.orchestra.run.vm01.stdout:Device: 5h/5d Inode: 793 Links: 1 2026-04-16T19:18:54.531 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-16T19:18:54.531 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-16 19:15:46.398725240 +0000 2026-04-16T19:18:54.531 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-16 19:15:46.274725240 +0000 2026-04-16T19:18:54.531 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-16 19:15:46.274725240 +0000 2026-04-16T19:18:54.531 INFO:teuthology.orchestra.run.vm01.stdout: Birth: - 2026-04-16T19:18:54.531 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vg_nvme/lv_1 of=/dev/null count=1 2026-04-16T19:18:54.580 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-16T19:18:54.580 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-16T19:18:54.580 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000152117 s, 3.4 MB/s 2026-04-16T19:18:54.581 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_1 2026-04-16T19:18:54.628 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vg_nvme/lv_2 2026-04-16T19:18:54.675 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vg_nvme/lv_2 -> ../dm-1 2026-04-16T19:18:54.675 INFO:teuthology.orchestra.run.vm01.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-16T19:18:54.675 INFO:teuthology.orchestra.run.vm01.stdout:Device: 5h/5d Inode: 823 Links: 1 2026-04-16T19:18:54.675 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-16T19:18:54.675 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-16 19:15:46.686725240 +0000 2026-04-16T19:18:54.675 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-16 19:15:46.558725240 +0000 2026-04-16T19:18:54.675 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-16 19:15:46.558725240 +0000 2026-04-16T19:18:54.675 INFO:teuthology.orchestra.run.vm01.stdout: Birth: - 2026-04-16T19:18:54.675 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vg_nvme/lv_2 of=/dev/null count=1 2026-04-16T19:18:54.725 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-16T19:18:54.725 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-16T19:18:54.725 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000169562 s, 3.0 MB/s 2026-04-16T19:18:54.726 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_2 2026-04-16T19:18:54.776 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vg_nvme/lv_3 2026-04-16T19:18:54.823 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vg_nvme/lv_3 -> ../dm-2 2026-04-16T19:18:54.823 INFO:teuthology.orchestra.run.vm01.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-16T19:18:54.823 INFO:teuthology.orchestra.run.vm01.stdout:Device: 5h/5d Inode: 855 Links: 1 2026-04-16T19:18:54.823 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-16T19:18:54.823 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-16 19:15:46.978725240 +0000 2026-04-16T19:18:54.823 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-16 19:15:46.842725240 +0000 2026-04-16T19:18:54.823 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-16 19:15:46.842725240 +0000 2026-04-16T19:18:54.823 INFO:teuthology.orchestra.run.vm01.stdout: Birth: - 2026-04-16T19:18:54.823 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vg_nvme/lv_3 of=/dev/null count=1 2026-04-16T19:18:54.872 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-16T19:18:54.872 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-16T19:18:54.872 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000159548 s, 3.2 MB/s 2026-04-16T19:18:54.873 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_3 2026-04-16T19:18:54.920 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vg_nvme/lv_4 2026-04-16T19:18:54.966 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vg_nvme/lv_4 -> ../dm-3 2026-04-16T19:18:54.967 INFO:teuthology.orchestra.run.vm01.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-16T19:18:54.967 INFO:teuthology.orchestra.run.vm01.stdout:Device: 5h/5d Inode: 884 Links: 1 2026-04-16T19:18:54.967 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-16T19:18:54.967 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-16 19:18:40.086080305 +0000 2026-04-16T19:18:54.967 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-16 19:15:47.154725240 +0000 2026-04-16T19:18:54.967 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-16 19:15:47.154725240 +0000 2026-04-16T19:18:54.967 INFO:teuthology.orchestra.run.vm01.stdout: Birth: - 2026-04-16T19:18:54.967 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vg_nvme/lv_4 of=/dev/null count=1 2026-04-16T19:18:55.015 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-16T19:18:55.021 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-16T19:18:55.021 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000142455 s, 3.6 MB/s 2026-04-16T19:18:55.022 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_4 2026-04-16T19:18:55.067 DEBUG:teuthology.orchestra.run.vm01:> sudo apt install -y linux-modules-extra-$(uname -r) 2026-04-16T19:18:55.118 INFO:teuthology.orchestra.run.vm01.stderr: 2026-04-16T19:18:55.118 INFO:teuthology.orchestra.run.vm01.stderr:WARNING: apt does not have a stable CLI interface. Use with caution in scripts. 2026-04-16T19:18:55.118 INFO:teuthology.orchestra.run.vm01.stderr: 2026-04-16T19:18:55.144 INFO:teuthology.orchestra.run.vm01.stdout:Reading package lists... 2026-04-16T19:18:55.324 INFO:teuthology.orchestra.run.vm01.stdout:Building dependency tree... 2026-04-16T19:18:55.324 INFO:teuthology.orchestra.run.vm01.stdout:Reading state information... 2026-04-16T19:18:55.451 INFO:teuthology.orchestra.run.vm01.stdout:The following packages were automatically installed and are no longer required: 2026-04-16T19:18:55.451 INFO:teuthology.orchestra.run.vm01.stdout: kpartx libsgutils2-2 sg3-utils sg3-utils-udev 2026-04-16T19:18:55.451 INFO:teuthology.orchestra.run.vm01.stdout:Use 'sudo apt autoremove' to remove them. 2026-04-16T19:18:55.452 INFO:teuthology.orchestra.run.vm01.stdout:The following additional packages will be installed: 2026-04-16T19:18:55.452 INFO:teuthology.orchestra.run.vm01.stdout: wireless-regdb 2026-04-16T19:18:55.493 INFO:teuthology.orchestra.run.vm01.stdout:The following NEW packages will be installed: 2026-04-16T19:18:55.493 INFO:teuthology.orchestra.run.vm01.stdout: linux-modules-extra-5.15.0-171-generic wireless-regdb 2026-04-16T19:18:55.524 INFO:teuthology.orchestra.run.vm01.stdout:0 upgraded, 2 newly installed, 0 to remove and 62 not upgraded. 2026-04-16T19:18:55.603 INFO:teuthology.orchestra.run.vm01.stdout:Need to get 63.9 MB of archives. 2026-04-16T19:18:55.603 INFO:teuthology.orchestra.run.vm01.stdout:After this operation, 353 MB of additional disk space will be used. 2026-04-16T19:18:55.604 INFO:teuthology.orchestra.run.vm01.stdout:Get:1 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 wireless-regdb all 2025.10.07-0ubuntu1~22.04.1 [10.1 kB] 2026-04-16T19:18:55.611 INFO:teuthology.orchestra.run.vm01.stdout:Get:2 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 linux-modules-extra-5.15.0-171-generic amd64 5.15.0-171.181 [63.9 MB] 2026-04-16T19:18:56.357 INFO:teuthology.orchestra.run.vm01.stderr:debconf: unable to initialize frontend: Dialog 2026-04-16T19:18:56.357 INFO:teuthology.orchestra.run.vm01.stderr:debconf: (Dialog frontend will not work on a dumb terminal, an emacs shell buffer, or without a controlling terminal.) 2026-04-16T19:18:56.357 INFO:teuthology.orchestra.run.vm01.stderr:debconf: falling back to frontend: Readline 2026-04-16T19:18:56.362 INFO:teuthology.orchestra.run.vm01.stderr:debconf: unable to initialize frontend: Readline 2026-04-16T19:18:56.363 INFO:teuthology.orchestra.run.vm01.stderr:debconf: (This frontend requires a controlling tty.) 2026-04-16T19:18:56.363 INFO:teuthology.orchestra.run.vm01.stderr:debconf: falling back to frontend: Teletype 2026-04-16T19:18:56.366 INFO:teuthology.orchestra.run.vm01.stderr:dpkg-preconfigure: unable to re-open stdin: 2026-04-16T19:18:56.393 INFO:teuthology.orchestra.run.vm01.stdout:Fetched 63.9 MB in 1s (94.7 MB/s) 2026-04-16T19:18:56.477 INFO:teuthology.orchestra.run.vm01.stdout:Selecting previously unselected package wireless-regdb. 2026-04-16T19:18:56.515 INFO:teuthology.orchestra.run.vm01.stdout:(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 119267 files and directories currently installed.) 2026-04-16T19:18:56.518 INFO:teuthology.orchestra.run.vm01.stdout:Preparing to unpack .../wireless-regdb_2025.10.07-0ubuntu1~22.04.1_all.deb ... 2026-04-16T19:18:56.519 INFO:teuthology.orchestra.run.vm01.stdout:Unpacking wireless-regdb (2025.10.07-0ubuntu1~22.04.1) ... 2026-04-16T19:18:56.541 INFO:teuthology.orchestra.run.vm01.stdout:Selecting previously unselected package linux-modules-extra-5.15.0-171-generic. 2026-04-16T19:18:56.548 INFO:teuthology.orchestra.run.vm01.stdout:Preparing to unpack .../linux-modules-extra-5.15.0-171-generic_5.15.0-171.181_amd64.deb ... 2026-04-16T19:18:56.549 INFO:teuthology.orchestra.run.vm01.stdout:Unpacking linux-modules-extra-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-16T19:18:58.295 INFO:teuthology.orchestra.run.vm01.stdout:Setting up wireless-regdb (2025.10.07-0ubuntu1~22.04.1) ... 2026-04-16T19:18:58.298 INFO:teuthology.orchestra.run.vm01.stdout:Setting up linux-modules-extra-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-16T19:18:59.628 INFO:teuthology.orchestra.run.vm01.stdout:Processing triggers for man-db (2.10.2-1) ... 2026-04-16T19:18:59.664 INFO:teuthology.orchestra.run.vm01.stdout:Processing triggers for linux-image-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-16T19:18:59.670 INFO:teuthology.orchestra.run.vm01.stdout:/etc/kernel/postinst.d/initramfs-tools: 2026-04-16T19:18:59.670 INFO:teuthology.orchestra.run.vm01.stdout:update-initramfs: Generating /boot/initrd.img-5.15.0-171-generic 2026-04-16T19:19:08.766 INFO:teuthology.orchestra.run.vm01.stdout:/etc/kernel/postinst.d/zz-update-grub: 2026-04-16T19:19:08.766 INFO:teuthology.orchestra.run.vm01.stdout:Sourcing file `/etc/default/grub' 2026-04-16T19:19:08.784 INFO:teuthology.orchestra.run.vm01.stdout:Sourcing file `/etc/default/grub.d/50-cloudimg-settings.cfg' 2026-04-16T19:19:08.785 INFO:teuthology.orchestra.run.vm01.stdout:Sourcing file `/etc/default/grub.d/init-select.cfg' 2026-04-16T19:19:08.786 INFO:teuthology.orchestra.run.vm01.stdout:Generating grub configuration file ... 2026-04-16T19:19:08.896 INFO:teuthology.orchestra.run.vm01.stdout:Found linux image: /boot/vmlinuz-5.15.0-171-generic 2026-04-16T19:19:08.901 INFO:teuthology.orchestra.run.vm01.stdout:Found initrd image: /boot/initrd.img-5.15.0-171-generic 2026-04-16T19:19:09.174 INFO:teuthology.orchestra.run.vm01.stdout:Warning: os-prober will not be executed to detect other bootable partitions. 2026-04-16T19:19:09.174 INFO:teuthology.orchestra.run.vm01.stdout:Systems on them will not be added to the GRUB boot configuration. 2026-04-16T19:19:09.174 INFO:teuthology.orchestra.run.vm01.stdout:Check GRUB_DISABLE_OS_PROBER documentation entry. 2026-04-16T19:19:09.182 INFO:teuthology.orchestra.run.vm01.stdout:done 2026-04-16T19:19:09.457 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:09.457 INFO:teuthology.orchestra.run.vm01.stdout:Running kernel seems to be up-to-date. 2026-04-16T19:19:09.457 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:09.457 INFO:teuthology.orchestra.run.vm01.stdout:Services to be restarted: 2026-04-16T19:19:09.461 INFO:teuthology.orchestra.run.vm01.stdout: systemctl restart apache-htcacheclean.service 2026-04-16T19:19:09.466 INFO:teuthology.orchestra.run.vm01.stdout: systemctl restart rsyslog.service 2026-04-16T19:19:09.469 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:09.469 INFO:teuthology.orchestra.run.vm01.stdout:Service restarts being deferred: 2026-04-16T19:19:09.469 INFO:teuthology.orchestra.run.vm01.stdout: systemctl restart networkd-dispatcher.service 2026-04-16T19:19:09.469 INFO:teuthology.orchestra.run.vm01.stdout: systemctl restart unattended-upgrades.service 2026-04-16T19:19:09.469 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:09.469 INFO:teuthology.orchestra.run.vm01.stdout:No containers need to be restarted. 2026-04-16T19:19:09.469 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:09.469 INFO:teuthology.orchestra.run.vm01.stdout:No user sessions are running outdated binaries. 2026-04-16T19:19:09.469 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:09.469 INFO:teuthology.orchestra.run.vm01.stdout:No VM guests are running outdated hypervisor (qemu) binaries on this host. 2026-04-16T19:19:10.750 DEBUG:teuthology.orchestra.run.vm01:> sudo apt install -y nvme-cli 2026-04-16T19:19:10.799 INFO:teuthology.orchestra.run.vm01.stderr: 2026-04-16T19:19:10.799 INFO:teuthology.orchestra.run.vm01.stderr:WARNING: apt does not have a stable CLI interface. Use with caution in scripts. 2026-04-16T19:19:10.799 INFO:teuthology.orchestra.run.vm01.stderr: 2026-04-16T19:19:10.828 INFO:teuthology.orchestra.run.vm01.stdout:Reading package lists... 2026-04-16T19:19:11.027 INFO:teuthology.orchestra.run.vm01.stdout:Building dependency tree... 2026-04-16T19:19:11.028 INFO:teuthology.orchestra.run.vm01.stdout:Reading state information... 2026-04-16T19:19:11.187 INFO:teuthology.orchestra.run.vm01.stdout:The following packages were automatically installed and are no longer required: 2026-04-16T19:19:11.187 INFO:teuthology.orchestra.run.vm01.stdout: kpartx libsgutils2-2 sg3-utils sg3-utils-udev 2026-04-16T19:19:11.187 INFO:teuthology.orchestra.run.vm01.stdout:Use 'sudo apt autoremove' to remove them. 2026-04-16T19:19:11.239 INFO:teuthology.orchestra.run.vm01.stdout:The following NEW packages will be installed: 2026-04-16T19:19:11.239 INFO:teuthology.orchestra.run.vm01.stdout: nvme-cli 2026-04-16T19:19:11.271 INFO:teuthology.orchestra.run.vm01.stdout:0 upgraded, 1 newly installed, 0 to remove and 62 not upgraded. 2026-04-16T19:19:11.529 INFO:teuthology.orchestra.run.vm01.stdout:Need to get 474 kB of archives. 2026-04-16T19:19:11.529 INFO:teuthology.orchestra.run.vm01.stdout:After this operation, 1136 kB of additional disk space will be used. 2026-04-16T19:19:11.529 INFO:teuthology.orchestra.run.vm01.stdout:Get:1 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 nvme-cli amd64 1.16-3ubuntu0.3 [474 kB] 2026-04-16T19:19:12.630 INFO:teuthology.orchestra.run.vm01.stderr:debconf: unable to initialize frontend: Dialog 2026-04-16T19:19:12.630 INFO:teuthology.orchestra.run.vm01.stderr:debconf: (Dialog frontend will not work on a dumb terminal, an emacs shell buffer, or without a controlling terminal.) 2026-04-16T19:19:12.630 INFO:teuthology.orchestra.run.vm01.stderr:debconf: falling back to frontend: Readline 2026-04-16T19:19:12.635 INFO:teuthology.orchestra.run.vm01.stderr:debconf: unable to initialize frontend: Readline 2026-04-16T19:19:12.635 INFO:teuthology.orchestra.run.vm01.stderr:debconf: (This frontend requires a controlling tty.) 2026-04-16T19:19:12.635 INFO:teuthology.orchestra.run.vm01.stderr:debconf: falling back to frontend: Teletype 2026-04-16T19:19:12.638 INFO:teuthology.orchestra.run.vm01.stderr:dpkg-preconfigure: unable to re-open stdin: 2026-04-16T19:19:12.668 INFO:teuthology.orchestra.run.vm01.stdout:Fetched 474 kB in 1s (399 kB/s) 2026-04-16T19:19:12.687 INFO:teuthology.orchestra.run.vm01.stdout:Selecting previously unselected package nvme-cli. 2026-04-16T19:19:12.715 INFO:teuthology.orchestra.run.vm01.stdout:(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 125173 files and directories currently installed.) 2026-04-16T19:19:12.716 INFO:teuthology.orchestra.run.vm01.stdout:Preparing to unpack .../nvme-cli_1.16-3ubuntu0.3_amd64.deb ... 2026-04-16T19:19:12.717 INFO:teuthology.orchestra.run.vm01.stdout:Unpacking nvme-cli (1.16-3ubuntu0.3) ... 2026-04-16T19:19:12.780 INFO:teuthology.orchestra.run.vm01.stdout:Setting up nvme-cli (1.16-3ubuntu0.3) ... 2026-04-16T19:19:12.849 INFO:teuthology.orchestra.run.vm01.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /lib/systemd/system/nvmefc-boot-connections.service. 2026-04-16T19:19:13.122 INFO:teuthology.orchestra.run.vm01.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmf-autoconnect.service → /lib/systemd/system/nvmf-autoconnect.service. 2026-04-16T19:19:13.523 INFO:teuthology.orchestra.run.vm01.stdout:nvmf-connect.target is a disabled or a static unit, not starting it. 2026-04-16T19:19:13.542 INFO:teuthology.orchestra.run.vm01.stdout:Processing triggers for man-db (2.10.2-1) ... 2026-04-16T19:19:13.859 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:13.860 INFO:teuthology.orchestra.run.vm01.stdout:Running kernel seems to be up-to-date. 2026-04-16T19:19:13.860 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:13.860 INFO:teuthology.orchestra.run.vm01.stdout:Services to be restarted: 2026-04-16T19:19:13.863 INFO:teuthology.orchestra.run.vm01.stdout: systemctl restart apache-htcacheclean.service 2026-04-16T19:19:13.869 INFO:teuthology.orchestra.run.vm01.stdout: systemctl restart rsyslog.service 2026-04-16T19:19:13.872 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:13.872 INFO:teuthology.orchestra.run.vm01.stdout:Service restarts being deferred: 2026-04-16T19:19:13.872 INFO:teuthology.orchestra.run.vm01.stdout: systemctl restart networkd-dispatcher.service 2026-04-16T19:19:13.872 INFO:teuthology.orchestra.run.vm01.stdout: systemctl restart unattended-upgrades.service 2026-04-16T19:19:13.872 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:13.872 INFO:teuthology.orchestra.run.vm01.stdout:No containers need to be restarted. 2026-04-16T19:19:13.872 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:13.872 INFO:teuthology.orchestra.run.vm01.stdout:No user sessions are running outdated binaries. 2026-04-16T19:19:13.872 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:19:13.872 INFO:teuthology.orchestra.run.vm01.stdout:No VM guests are running outdated hypervisor (qemu) binaries on this host. 2026-04-16T19:19:15.173 DEBUG:teuthology.orchestra.run.vm01:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-04-16T19:19:15.247 INFO:teuthology.orchestra.run.vm01.stdout:loop 2026-04-16T19:19:15.247 INFO:tasks.nvme_loop:Connecting nvme_loop vm01:/dev/vg_nvme/lv_1... 2026-04-16T19:19:15.248 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn 2026-04-16T19:19:15.304 INFO:teuthology.orchestra.run.vm01.stdout:1 2026-04-16T19:19:15.321 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vg_nvme/lv_11 2026-04-16T19:19:15.337 INFO:tasks.nvme_loop:Connecting nvme_loop vm01:/dev/vg_nvme/lv_2... 2026-04-16T19:19:15.337 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1 && echo -n /dev/vg_nvme/lv_2 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_2 /sys/kernel/config/nvmet/ports/1/subsystems/lv_2 && sudo nvme connect -t loop -n lv_2 -q hostnqn 2026-04-16T19:19:15.397 INFO:teuthology.orchestra.run.vm01.stdout:1 2026-04-16T19:19:15.415 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vg_nvme/lv_21 2026-04-16T19:19:15.431 INFO:tasks.nvme_loop:Connecting nvme_loop vm01:/dev/vg_nvme/lv_3... 2026-04-16T19:19:15.432 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1 && echo -n /dev/vg_nvme/lv_3 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_3 /sys/kernel/config/nvmet/ports/1/subsystems/lv_3 && sudo nvme connect -t loop -n lv_3 -q hostnqn 2026-04-16T19:19:15.490 INFO:teuthology.orchestra.run.vm01.stdout:1 2026-04-16T19:19:15.508 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vg_nvme/lv_31 2026-04-16T19:19:15.523 INFO:tasks.nvme_loop:Connecting nvme_loop vm01:/dev/vg_nvme/lv_4... 2026-04-16T19:19:15.523 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1 && echo -n /dev/vg_nvme/lv_4 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_4 /sys/kernel/config/nvmet/ports/1/subsystems/lv_4 && sudo nvme connect -t loop -n lv_4 -q hostnqn 2026-04-16T19:19:15.579 INFO:teuthology.orchestra.run.vm01.stdout:1 2026-04-16T19:19:15.595 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vg_nvme/lv_41 2026-04-16T19:19:15.608 DEBUG:teuthology.orchestra.run.vm01:> lsblk 2026-04-16T19:19:15.660 INFO:teuthology.orchestra.run.vm01.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-04-16T19:19:15.660 INFO:teuthology.orchestra.run.vm01.stdout:loop0 7:0 0 63.8M 1 loop /snap/core20/2717 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:loop1 7:1 0 91.6M 1 loop /snap/lxd/37982 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:loop2 7:2 0 48.1M 1 loop /snap/snapd/25935 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:sr0 11:0 1 366K 0 rom 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:vda 252:0 0 40G 0 disk 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:├─vda1 252:1 0 39.9G 0 part / 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:├─vda14 252:14 0 4M 0 part 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:└─vda15 252:15 0 106M 0 part /boot/efi 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:vdb 252:16 0 20G 0 disk 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:└─vg_nvme-lv_1 253:0 0 20G 0 lvm 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:vdc 252:32 0 20G 0 disk 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:└─vg_nvme-lv_2 253:1 0 20G 0 lvm 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:vdd 252:48 0 20G 0 disk 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:└─vg_nvme-lv_3 253:2 0 20G 0 lvm 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:vde 252:64 0 20G 0 disk 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:└─vg_nvme-lv_4 253:3 0 20G 0 lvm 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:nvme0n1 259:1 0 20G 0 disk 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:nvme1n1 259:3 0 20G 0 disk 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:nvme2n1 259:5 0 20G 0 disk 2026-04-16T19:19:15.661 INFO:teuthology.orchestra.run.vm01.stdout:nvme3n1 259:7 0 20G 0 disk 2026-04-16T19:19:15.661 DEBUG:teuthology.orchestra.run.vm01:> sudo nvme list -o json 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "Devices" : [ 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "NameSpace" : 1, 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "DevicePath" : "/dev/nvme0n1", 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "Firmware" : "5.15.0-1", 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "Index" : 0, 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "ModelNumber" : "Linux", 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "SerialNumber" : "f0ab57676a82334da227", 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "UsedBytes" : 21470642176, 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "MaximumLBA" : 41934848, 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "PhysicalSize" : 21470642176, 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "SectorSize" : 512 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "NameSpace" : 1, 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "DevicePath" : "/dev/nvme1n1", 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "Firmware" : "5.15.0-1", 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "Index" : 1, 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "ModelNumber" : "Linux", 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "SerialNumber" : "498a3ba4a5f7d3afa78b", 2026-04-16T19:19:15.709 INFO:teuthology.orchestra.run.vm01.stdout: "UsedBytes" : 21470642176, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "MaximumLBA" : 41934848, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "PhysicalSize" : 21470642176, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "SectorSize" : 512 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "NameSpace" : 1, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "DevicePath" : "/dev/nvme2n1", 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "Firmware" : "5.15.0-1", 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "Index" : 2, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "ModelNumber" : "Linux", 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "SerialNumber" : "018d58381178c5541924", 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "UsedBytes" : 21470642176, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "MaximumLBA" : 41934848, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "PhysicalSize" : 21470642176, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "SectorSize" : 512 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "NameSpace" : 1, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "DevicePath" : "/dev/nvme3n1", 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "Firmware" : "5.15.0-1", 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "Index" : 3, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "ModelNumber" : "Linux", 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "SerialNumber" : "cb1f69ea567d81122781", 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "UsedBytes" : 21470642176, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "MaximumLBA" : 41934848, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "PhysicalSize" : 21470642176, 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: "SectorSize" : 512 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout: ] 2026-04-16T19:19:15.710 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-04-16T19:19:15.710 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-04-16T19:19:15.761 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:15.761 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:15.761 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00430257 s, 952 kB/s 2026-04-16T19:19:15.762 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-04-16T19:19:15.810 INFO:teuthology.orchestra.run.vm01.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:15.810 INFO:teuthology.orchestra.run.vm01.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:15.810 INFO:teuthology.orchestra.run.vm01.stdout:00000016 2026-04-16T19:19:15.810 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-04-16T19:19:15.861 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:15.861 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:15.862 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00472412 s, 867 kB/s 2026-04-16T19:19:15.862 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-04-16T19:19:15.908 INFO:teuthology.orchestra.run.vm01.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:15.908 INFO:teuthology.orchestra.run.vm01.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:15.909 INFO:teuthology.orchestra.run.vm01.stdout:40000016 2026-04-16T19:19:15.909 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-04-16T19:19:15.961 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:15.961 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:15.961 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00421947 s, 971 kB/s 2026-04-16T19:19:15.961 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-04-16T19:19:16.013 INFO:teuthology.orchestra.run.vm01.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:16.013 INFO:teuthology.orchestra.run.vm01.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:16.013 INFO:teuthology.orchestra.run.vm01.stdout:280000016 2026-04-16T19:19:16.014 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-04-16T19:19:16.064 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:16.064 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:16.064 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00388798 s, 1.1 MB/s 2026-04-16T19:19:16.065 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-04-16T19:19:16.116 INFO:teuthology.orchestra.run.vm01.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:16.116 INFO:teuthology.orchestra.run.vm01.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:16.116 INFO:teuthology.orchestra.run.vm01.stdout:00000016 2026-04-16T19:19:16.116 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-04-16T19:19:16.171 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:16.172 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:16.172 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00553628 s, 740 kB/s 2026-04-16T19:19:16.173 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-04-16T19:19:16.223 INFO:teuthology.orchestra.run.vm01.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:16.223 INFO:teuthology.orchestra.run.vm01.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:16.223 INFO:teuthology.orchestra.run.vm01.stdout:40000016 2026-04-16T19:19:16.224 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-04-16T19:19:16.277 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:16.277 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:16.277 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00373797 s, 1.1 MB/s 2026-04-16T19:19:16.277 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-04-16T19:19:16.324 INFO:teuthology.orchestra.run.vm01.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:16.324 INFO:teuthology.orchestra.run.vm01.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:16.324 INFO:teuthology.orchestra.run.vm01.stdout:280000016 2026-04-16T19:19:16.325 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-04-16T19:19:16.377 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:16.377 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:16.377 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0045727 s, 896 kB/s 2026-04-16T19:19:16.377 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-04-16T19:19:16.429 INFO:teuthology.orchestra.run.vm01.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:16.429 INFO:teuthology.orchestra.run.vm01.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:16.429 INFO:teuthology.orchestra.run.vm01.stdout:00000016 2026-04-16T19:19:16.429 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-04-16T19:19:16.481 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:16.481 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:16.481 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00380052 s, 1.1 MB/s 2026-04-16T19:19:16.481 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-04-16T19:19:16.529 INFO:teuthology.orchestra.run.vm01.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:16.529 INFO:teuthology.orchestra.run.vm01.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:16.529 INFO:teuthology.orchestra.run.vm01.stdout:40000016 2026-04-16T19:19:16.530 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-04-16T19:19:16.581 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:16.581 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:16.581 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00405461 s, 1.0 MB/s 2026-04-16T19:19:16.582 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-04-16T19:19:16.632 INFO:teuthology.orchestra.run.vm01.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:16.632 INFO:teuthology.orchestra.run.vm01.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:16.632 INFO:teuthology.orchestra.run.vm01.stdout:280000016 2026-04-16T19:19:16.633 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-04-16T19:19:16.685 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:16.685 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:16.685 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00443286 s, 924 kB/s 2026-04-16T19:19:16.686 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-04-16T19:19:16.737 INFO:teuthology.orchestra.run.vm01.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:16.737 INFO:teuthology.orchestra.run.vm01.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:16.737 INFO:teuthology.orchestra.run.vm01.stdout:00000016 2026-04-16T19:19:16.737 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-04-16T19:19:16.789 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:16.789 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:16.789 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00431372 s, 950 kB/s 2026-04-16T19:19:16.790 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-04-16T19:19:16.841 INFO:teuthology.orchestra.run.vm01.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:16.841 INFO:teuthology.orchestra.run.vm01.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:16.841 INFO:teuthology.orchestra.run.vm01.stdout:40000016 2026-04-16T19:19:16.841 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-04-16T19:19:16.893 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-16T19:19:16.893 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-16T19:19:16.893 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00435058 s, 941 kB/s 2026-04-16T19:19:16.893 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-04-16T19:19:16.944 INFO:teuthology.orchestra.run.vm01.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:16.945 INFO:teuthology.orchestra.run.vm01.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:16.945 INFO:teuthology.orchestra.run.vm01.stdout:280000016 2026-04-16T19:19:16.945 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-16T19:19:16.945 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:19:16.945 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/scratch_devs 2026-04-16T19:19:16.994 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-16T19:19:16.994 DEBUG:teuthology.orchestra.run.vm04:> dd if=/scratch_devs of=/dev/stdout 2026-04-16T19:19:16.997 DEBUG:teuthology.misc:devs=['/dev/vg_nvme/lv_1', '/dev/vg_nvme/lv_2', '/dev/vg_nvme/lv_3', '/dev/vg_nvme/lv_4'] 2026-04-16T19:19:16.997 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vg_nvme/lv_1 2026-04-16T19:19:17.044 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vg_nvme/lv_1 -> ../dm-0 2026-04-16T19:19:17.044 INFO:teuthology.orchestra.run.vm04.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-16T19:19:17.044 INFO:teuthology.orchestra.run.vm04.stdout:Device: 5h/5d Inode: 783 Links: 1 2026-04-16T19:19:17.044 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-16T19:19:17.044 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-16 19:18:35.129084737 +0000 2026-04-16T19:19:17.044 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-16 19:18:34.989076846 +0000 2026-04-16T19:19:17.044 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-16 19:18:34.989076846 +0000 2026-04-16T19:19:17.044 INFO:teuthology.orchestra.run.vm04.stdout: Birth: - 2026-04-16T19:19:17.044 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vg_nvme/lv_1 of=/dev/null count=1 2026-04-16T19:19:17.091 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-16T19:19:17.091 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-16T19:19:17.091 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000164398 s, 3.1 MB/s 2026-04-16T19:19:17.092 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_1 2026-04-16T19:19:17.140 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vg_nvme/lv_2 2026-04-16T19:19:17.187 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vg_nvme/lv_2 -> ../dm-1 2026-04-16T19:19:17.187 INFO:teuthology.orchestra.run.vm04.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-16T19:19:17.187 INFO:teuthology.orchestra.run.vm04.stdout:Device: 5h/5d Inode: 813 Links: 1 2026-04-16T19:19:17.187 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-16T19:19:17.187 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-16 19:18:35.405100247 +0000 2026-04-16T19:19:17.187 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-16 19:18:35.273092829 +0000 2026-04-16T19:19:17.187 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-16 19:18:35.273092829 +0000 2026-04-16T19:19:17.187 INFO:teuthology.orchestra.run.vm04.stdout: Birth: - 2026-04-16T19:19:17.187 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vg_nvme/lv_2 of=/dev/null count=1 2026-04-16T19:19:17.235 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-16T19:19:17.235 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-16T19:19:17.235 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000157966 s, 3.2 MB/s 2026-04-16T19:19:17.235 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_2 2026-04-16T19:19:17.280 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vg_nvme/lv_3 2026-04-16T19:19:17.324 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vg_nvme/lv_3 -> ../dm-2 2026-04-16T19:19:17.324 INFO:teuthology.orchestra.run.vm04.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-16T19:19:17.324 INFO:teuthology.orchestra.run.vm04.stdout:Device: 5h/5d Inode: 846 Links: 1 2026-04-16T19:19:17.324 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-16T19:19:17.324 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-16 19:18:35.721118004 +0000 2026-04-16T19:19:17.324 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-16 19:18:35.589110586 +0000 2026-04-16T19:19:17.324 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-16 19:18:35.589110586 +0000 2026-04-16T19:19:17.324 INFO:teuthology.orchestra.run.vm04.stdout: Birth: - 2026-04-16T19:19:17.324 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vg_nvme/lv_3 of=/dev/null count=1 2026-04-16T19:19:17.375 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-16T19:19:17.375 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-16T19:19:17.375 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.00013901 s, 3.7 MB/s 2026-04-16T19:19:17.376 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_3 2026-04-16T19:19:17.424 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vg_nvme/lv_4 2026-04-16T19:19:17.471 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vg_nvme/lv_4 -> ../dm-3 2026-04-16T19:19:17.471 INFO:teuthology.orchestra.run.vm04.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-16T19:19:17.471 INFO:teuthology.orchestra.run.vm04.stdout:Device: 5h/5d Inode: 875 Links: 1 2026-04-16T19:19:17.471 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-16T19:19:17.471 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-16 19:18:39.685328673 +0000 2026-04-16T19:19:17.471 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-16 19:18:35.881126995 +0000 2026-04-16T19:19:17.471 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-16 19:18:35.881126995 +0000 2026-04-16T19:19:17.471 INFO:teuthology.orchestra.run.vm04.stdout: Birth: - 2026-04-16T19:19:17.472 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vg_nvme/lv_4 of=/dev/null count=1 2026-04-16T19:19:17.520 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-16T19:19:17.520 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-16T19:19:17.520 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000146604 s, 3.5 MB/s 2026-04-16T19:19:17.521 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_4 2026-04-16T19:19:17.569 DEBUG:teuthology.orchestra.run.vm04:> sudo apt install -y linux-modules-extra-$(uname -r) 2026-04-16T19:19:17.623 INFO:teuthology.orchestra.run.vm04.stderr: 2026-04-16T19:19:17.623 INFO:teuthology.orchestra.run.vm04.stderr:WARNING: apt does not have a stable CLI interface. Use with caution in scripts. 2026-04-16T19:19:17.623 INFO:teuthology.orchestra.run.vm04.stderr: 2026-04-16T19:19:17.649 INFO:teuthology.orchestra.run.vm04.stdout:Reading package lists... 2026-04-16T19:19:17.774 INFO:teuthology.orchestra.run.vm04.stdout:Building dependency tree... 2026-04-16T19:19:17.774 INFO:teuthology.orchestra.run.vm04.stdout:Reading state information... 2026-04-16T19:19:17.904 INFO:teuthology.orchestra.run.vm04.stdout:The following packages were automatically installed and are no longer required: 2026-04-16T19:19:17.906 INFO:teuthology.orchestra.run.vm04.stdout: kpartx libsgutils2-2 sg3-utils sg3-utils-udev 2026-04-16T19:19:17.906 INFO:teuthology.orchestra.run.vm04.stdout:Use 'sudo apt autoremove' to remove them. 2026-04-16T19:19:17.908 INFO:teuthology.orchestra.run.vm04.stdout:The following additional packages will be installed: 2026-04-16T19:19:17.908 INFO:teuthology.orchestra.run.vm04.stdout: wireless-regdb 2026-04-16T19:19:17.961 INFO:teuthology.orchestra.run.vm04.stdout:The following NEW packages will be installed: 2026-04-16T19:19:17.961 INFO:teuthology.orchestra.run.vm04.stdout: linux-modules-extra-5.15.0-171-generic wireless-regdb 2026-04-16T19:19:18.007 INFO:teuthology.orchestra.run.vm04.stdout:0 upgraded, 2 newly installed, 0 to remove and 62 not upgraded. 2026-04-16T19:19:18.117 INFO:teuthology.orchestra.run.vm04.stdout:Need to get 63.9 MB of archives. 2026-04-16T19:19:18.117 INFO:teuthology.orchestra.run.vm04.stdout:After this operation, 353 MB of additional disk space will be used. 2026-04-16T19:19:18.117 INFO:teuthology.orchestra.run.vm04.stdout:Get:1 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 wireless-regdb all 2025.10.07-0ubuntu1~22.04.1 [10.1 kB] 2026-04-16T19:19:18.154 INFO:teuthology.orchestra.run.vm04.stdout:Get:2 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 linux-modules-extra-5.15.0-171-generic amd64 5.15.0-171.181 [63.9 MB] 2026-04-16T19:19:19.569 INFO:teuthology.orchestra.run.vm04.stderr:debconf: unable to initialize frontend: Dialog 2026-04-16T19:19:19.569 INFO:teuthology.orchestra.run.vm04.stderr:debconf: (Dialog frontend will not work on a dumb terminal, an emacs shell buffer, or without a controlling terminal.) 2026-04-16T19:19:19.569 INFO:teuthology.orchestra.run.vm04.stderr:debconf: falling back to frontend: Readline 2026-04-16T19:19:19.575 INFO:teuthology.orchestra.run.vm04.stderr:debconf: unable to initialize frontend: Readline 2026-04-16T19:19:19.575 INFO:teuthology.orchestra.run.vm04.stderr:debconf: (This frontend requires a controlling tty.) 2026-04-16T19:19:19.575 INFO:teuthology.orchestra.run.vm04.stderr:debconf: falling back to frontend: Teletype 2026-04-16T19:19:19.577 INFO:teuthology.orchestra.run.vm04.stderr:dpkg-preconfigure: unable to re-open stdin: 2026-04-16T19:19:19.617 INFO:teuthology.orchestra.run.vm04.stdout:Fetched 63.9 MB in 1s (46.7 MB/s) 2026-04-16T19:19:19.709 INFO:teuthology.orchestra.run.vm04.stdout:Selecting previously unselected package wireless-regdb. 2026-04-16T19:19:19.733 INFO:teuthology.orchestra.run.vm04.stdout:(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 119267 files and directories currently installed.) 2026-04-16T19:19:19.734 INFO:teuthology.orchestra.run.vm04.stdout:Preparing to unpack .../wireless-regdb_2025.10.07-0ubuntu1~22.04.1_all.deb ... 2026-04-16T19:19:19.736 INFO:teuthology.orchestra.run.vm04.stdout:Unpacking wireless-regdb (2025.10.07-0ubuntu1~22.04.1) ... 2026-04-16T19:19:19.755 INFO:teuthology.orchestra.run.vm04.stdout:Selecting previously unselected package linux-modules-extra-5.15.0-171-generic. 2026-04-16T19:19:19.759 INFO:teuthology.orchestra.run.vm04.stdout:Preparing to unpack .../linux-modules-extra-5.15.0-171-generic_5.15.0-171.181_amd64.deb ... 2026-04-16T19:19:19.760 INFO:teuthology.orchestra.run.vm04.stdout:Unpacking linux-modules-extra-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-16T19:19:21.559 INFO:teuthology.orchestra.run.vm04.stdout:Setting up wireless-regdb (2025.10.07-0ubuntu1~22.04.1) ... 2026-04-16T19:19:21.561 INFO:teuthology.orchestra.run.vm04.stdout:Setting up linux-modules-extra-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-16T19:19:22.926 INFO:teuthology.orchestra.run.vm04.stdout:Processing triggers for man-db (2.10.2-1) ... 2026-04-16T19:19:22.966 INFO:teuthology.orchestra.run.vm04.stdout:Processing triggers for linux-image-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-16T19:19:22.972 INFO:teuthology.orchestra.run.vm04.stdout:/etc/kernel/postinst.d/initramfs-tools: 2026-04-16T19:19:22.972 INFO:teuthology.orchestra.run.vm04.stdout:update-initramfs: Generating /boot/initrd.img-5.15.0-171-generic 2026-04-16T19:19:32.275 INFO:teuthology.orchestra.run.vm04.stdout:/etc/kernel/postinst.d/zz-update-grub: 2026-04-16T19:19:32.275 INFO:teuthology.orchestra.run.vm04.stdout:Sourcing file `/etc/default/grub' 2026-04-16T19:19:32.294 INFO:teuthology.orchestra.run.vm04.stdout:Sourcing file `/etc/default/grub.d/50-cloudimg-settings.cfg' 2026-04-16T19:19:32.295 INFO:teuthology.orchestra.run.vm04.stdout:Sourcing file `/etc/default/grub.d/init-select.cfg' 2026-04-16T19:19:32.296 INFO:teuthology.orchestra.run.vm04.stdout:Generating grub configuration file ... 2026-04-16T19:19:32.382 INFO:teuthology.orchestra.run.vm04.stdout:Found linux image: /boot/vmlinuz-5.15.0-171-generic 2026-04-16T19:19:32.387 INFO:teuthology.orchestra.run.vm04.stdout:Found initrd image: /boot/initrd.img-5.15.0-171-generic 2026-04-16T19:19:32.606 INFO:teuthology.orchestra.run.vm04.stdout:Warning: os-prober will not be executed to detect other bootable partitions. 2026-04-16T19:19:32.606 INFO:teuthology.orchestra.run.vm04.stdout:Systems on them will not be added to the GRUB boot configuration. 2026-04-16T19:19:32.607 INFO:teuthology.orchestra.run.vm04.stdout:Check GRUB_DISABLE_OS_PROBER documentation entry. 2026-04-16T19:19:32.614 INFO:teuthology.orchestra.run.vm04.stdout:done 2026-04-16T19:19:32.883 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:32.883 INFO:teuthology.orchestra.run.vm04.stdout:Running kernel seems to be up-to-date. 2026-04-16T19:19:32.883 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:32.883 INFO:teuthology.orchestra.run.vm04.stdout:Services to be restarted: 2026-04-16T19:19:32.886 INFO:teuthology.orchestra.run.vm04.stdout: systemctl restart apache-htcacheclean.service 2026-04-16T19:19:32.892 INFO:teuthology.orchestra.run.vm04.stdout: systemctl restart rsyslog.service 2026-04-16T19:19:32.895 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:32.896 INFO:teuthology.orchestra.run.vm04.stdout:Service restarts being deferred: 2026-04-16T19:19:32.896 INFO:teuthology.orchestra.run.vm04.stdout: systemctl restart networkd-dispatcher.service 2026-04-16T19:19:32.896 INFO:teuthology.orchestra.run.vm04.stdout: systemctl restart unattended-upgrades.service 2026-04-16T19:19:32.896 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:32.896 INFO:teuthology.orchestra.run.vm04.stdout:No containers need to be restarted. 2026-04-16T19:19:32.896 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:32.896 INFO:teuthology.orchestra.run.vm04.stdout:No user sessions are running outdated binaries. 2026-04-16T19:19:32.896 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:32.896 INFO:teuthology.orchestra.run.vm04.stdout:No VM guests are running outdated hypervisor (qemu) binaries on this host. 2026-04-16T19:19:34.067 DEBUG:teuthology.orchestra.run.vm04:> sudo apt install -y nvme-cli 2026-04-16T19:19:34.120 INFO:teuthology.orchestra.run.vm04.stderr: 2026-04-16T19:19:34.121 INFO:teuthology.orchestra.run.vm04.stderr:WARNING: apt does not have a stable CLI interface. Use with caution in scripts. 2026-04-16T19:19:34.121 INFO:teuthology.orchestra.run.vm04.stderr: 2026-04-16T19:19:34.148 INFO:teuthology.orchestra.run.vm04.stdout:Reading package lists... 2026-04-16T19:19:34.330 INFO:teuthology.orchestra.run.vm04.stdout:Building dependency tree... 2026-04-16T19:19:34.331 INFO:teuthology.orchestra.run.vm04.stdout:Reading state information... 2026-04-16T19:19:34.586 INFO:teuthology.orchestra.run.vm04.stdout:The following packages were automatically installed and are no longer required: 2026-04-16T19:19:34.587 INFO:teuthology.orchestra.run.vm04.stdout: kpartx libsgutils2-2 sg3-utils sg3-utils-udev 2026-04-16T19:19:34.587 INFO:teuthology.orchestra.run.vm04.stdout:Use 'sudo apt autoremove' to remove them. 2026-04-16T19:19:34.642 INFO:teuthology.orchestra.run.vm04.stdout:The following NEW packages will be installed: 2026-04-16T19:19:34.642 INFO:teuthology.orchestra.run.vm04.stdout: nvme-cli 2026-04-16T19:19:34.680 INFO:teuthology.orchestra.run.vm04.stdout:0 upgraded, 1 newly installed, 0 to remove and 62 not upgraded. 2026-04-16T19:19:34.995 INFO:teuthology.orchestra.run.vm04.stdout:Need to get 474 kB of archives. 2026-04-16T19:19:34.996 INFO:teuthology.orchestra.run.vm04.stdout:After this operation, 1136 kB of additional disk space will be used. 2026-04-16T19:19:34.996 INFO:teuthology.orchestra.run.vm04.stdout:Get:1 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 nvme-cli amd64 1.16-3ubuntu0.3 [474 kB] 2026-04-16T19:19:35.184 INFO:teuthology.orchestra.run.vm04.stderr:debconf: unable to initialize frontend: Dialog 2026-04-16T19:19:35.184 INFO:teuthology.orchestra.run.vm04.stderr:debconf: (Dialog frontend will not work on a dumb terminal, an emacs shell buffer, or without a controlling terminal.) 2026-04-16T19:19:35.184 INFO:teuthology.orchestra.run.vm04.stderr:debconf: falling back to frontend: Readline 2026-04-16T19:19:35.189 INFO:teuthology.orchestra.run.vm04.stderr:debconf: unable to initialize frontend: Readline 2026-04-16T19:19:35.189 INFO:teuthology.orchestra.run.vm04.stderr:debconf: (This frontend requires a controlling tty.) 2026-04-16T19:19:35.189 INFO:teuthology.orchestra.run.vm04.stderr:debconf: falling back to frontend: Teletype 2026-04-16T19:19:35.192 INFO:teuthology.orchestra.run.vm04.stderr:dpkg-preconfigure: unable to re-open stdin: 2026-04-16T19:19:35.223 INFO:teuthology.orchestra.run.vm04.stdout:Fetched 474 kB in 0s (1510 kB/s) 2026-04-16T19:19:35.466 INFO:teuthology.orchestra.run.vm04.stdout:Selecting previously unselected package nvme-cli. 2026-04-16T19:19:35.508 INFO:teuthology.orchestra.run.vm04.stdout:(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 125173 files and directories currently installed.) 2026-04-16T19:19:35.511 INFO:teuthology.orchestra.run.vm04.stdout:Preparing to unpack .../nvme-cli_1.16-3ubuntu0.3_amd64.deb ... 2026-04-16T19:19:35.513 INFO:teuthology.orchestra.run.vm04.stdout:Unpacking nvme-cli (1.16-3ubuntu0.3) ... 2026-04-16T19:19:35.580 INFO:teuthology.orchestra.run.vm04.stdout:Setting up nvme-cli (1.16-3ubuntu0.3) ... 2026-04-16T19:19:35.669 INFO:teuthology.orchestra.run.vm04.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /lib/systemd/system/nvmefc-boot-connections.service. 2026-04-16T19:19:35.947 INFO:teuthology.orchestra.run.vm04.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmf-autoconnect.service → /lib/systemd/system/nvmf-autoconnect.service. 2026-04-16T19:19:36.327 INFO:teuthology.orchestra.run.vm04.stdout:nvmf-connect.target is a disabled or a static unit, not starting it. 2026-04-16T19:19:36.344 INFO:teuthology.orchestra.run.vm04.stdout:Processing triggers for man-db (2.10.2-1) ... 2026-04-16T19:19:36.671 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:36.671 INFO:teuthology.orchestra.run.vm04.stdout:Running kernel seems to be up-to-date. 2026-04-16T19:19:36.671 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:36.671 INFO:teuthology.orchestra.run.vm04.stdout:Services to be restarted: 2026-04-16T19:19:36.674 INFO:teuthology.orchestra.run.vm04.stdout: systemctl restart apache-htcacheclean.service 2026-04-16T19:19:36.681 INFO:teuthology.orchestra.run.vm04.stdout: systemctl restart rsyslog.service 2026-04-16T19:19:36.684 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:36.684 INFO:teuthology.orchestra.run.vm04.stdout:Service restarts being deferred: 2026-04-16T19:19:36.684 INFO:teuthology.orchestra.run.vm04.stdout: systemctl restart networkd-dispatcher.service 2026-04-16T19:19:36.685 INFO:teuthology.orchestra.run.vm04.stdout: systemctl restart unattended-upgrades.service 2026-04-16T19:19:36.685 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:36.685 INFO:teuthology.orchestra.run.vm04.stdout:No containers need to be restarted. 2026-04-16T19:19:36.685 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:36.685 INFO:teuthology.orchestra.run.vm04.stdout:No user sessions are running outdated binaries. 2026-04-16T19:19:36.685 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:19:36.685 INFO:teuthology.orchestra.run.vm04.stdout:No VM guests are running outdated hypervisor (qemu) binaries on this host. 2026-04-16T19:19:38.063 DEBUG:teuthology.orchestra.run.vm04:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-04-16T19:19:38.136 INFO:teuthology.orchestra.run.vm04.stdout:loop 2026-04-16T19:19:38.136 INFO:tasks.nvme_loop:Connecting nvme_loop vm04:/dev/vg_nvme/lv_1... 2026-04-16T19:19:38.136 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn 2026-04-16T19:19:38.191 INFO:teuthology.orchestra.run.vm04.stdout:1 2026-04-16T19:19:38.208 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vg_nvme/lv_11 2026-04-16T19:19:38.226 INFO:tasks.nvme_loop:Connecting nvme_loop vm04:/dev/vg_nvme/lv_2... 2026-04-16T19:19:38.226 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1 && echo -n /dev/vg_nvme/lv_2 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_2 /sys/kernel/config/nvmet/ports/1/subsystems/lv_2 && sudo nvme connect -t loop -n lv_2 -q hostnqn 2026-04-16T19:19:38.284 INFO:teuthology.orchestra.run.vm04.stdout:1 2026-04-16T19:19:38.302 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vg_nvme/lv_21 2026-04-16T19:19:38.320 INFO:tasks.nvme_loop:Connecting nvme_loop vm04:/dev/vg_nvme/lv_3... 2026-04-16T19:19:38.320 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1 && echo -n /dev/vg_nvme/lv_3 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_3 /sys/kernel/config/nvmet/ports/1/subsystems/lv_3 && sudo nvme connect -t loop -n lv_3 -q hostnqn 2026-04-16T19:19:38.378 INFO:teuthology.orchestra.run.vm04.stdout:1 2026-04-16T19:19:38.393 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vg_nvme/lv_31 2026-04-16T19:19:38.409 INFO:tasks.nvme_loop:Connecting nvme_loop vm04:/dev/vg_nvme/lv_4... 2026-04-16T19:19:38.409 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1 && echo -n /dev/vg_nvme/lv_4 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_4 /sys/kernel/config/nvmet/ports/1/subsystems/lv_4 && sudo nvme connect -t loop -n lv_4 -q hostnqn 2026-04-16T19:19:38.470 INFO:teuthology.orchestra.run.vm04.stdout:1 2026-04-16T19:19:38.486 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vg_nvme/lv_41 2026-04-16T19:19:38.501 DEBUG:teuthology.orchestra.run.vm04:> lsblk 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:loop0 7:0 0 63.8M 1 loop /snap/core20/2717 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:loop1 7:1 0 91.6M 1 loop /snap/lxd/37982 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:loop2 7:2 0 48.1M 1 loop /snap/snapd/25935 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:sr0 11:0 1 366K 0 rom 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:vda 252:0 0 40G 0 disk 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:├─vda1 252:1 0 39.9G 0 part / 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:├─vda14 252:14 0 4M 0 part 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:└─vda15 252:15 0 106M 0 part /boot/efi 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:vdb 252:16 0 20G 0 disk 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:└─vg_nvme-lv_1 253:0 0 20G 0 lvm 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:vdc 252:32 0 20G 0 disk 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:└─vg_nvme-lv_2 253:1 0 20G 0 lvm 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:vdd 252:48 0 20G 0 disk 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:└─vg_nvme-lv_3 253:2 0 20G 0 lvm 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:vde 252:64 0 20G 0 disk 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:└─vg_nvme-lv_4 253:3 0 20G 0 lvm 2026-04-16T19:19:38.551 INFO:teuthology.orchestra.run.vm04.stdout:nvme0n1 259:1 0 20G 0 disk 2026-04-16T19:19:38.552 INFO:teuthology.orchestra.run.vm04.stdout:nvme1n1 259:3 0 20G 0 disk 2026-04-16T19:19:38.552 INFO:teuthology.orchestra.run.vm04.stdout:nvme2n1 259:5 0 20G 0 disk 2026-04-16T19:19:38.552 INFO:teuthology.orchestra.run.vm04.stdout:nvme3n1 259:7 0 20G 0 disk 2026-04-16T19:19:38.552 DEBUG:teuthology.orchestra.run.vm04:> sudo nvme list -o json 2026-04-16T19:19:38.601 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-04-16T19:19:38.601 INFO:teuthology.orchestra.run.vm04.stdout: "Devices" : [ 2026-04-16T19:19:38.601 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "NameSpace" : 1, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "DevicePath" : "/dev/nvme0n1", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "Firmware" : "5.15.0-1", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "Index" : 0, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "ModelNumber" : "Linux", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "SerialNumber" : "29b2bfa9f2fa89bb2491", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "UsedBytes" : 21470642176, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "MaximumLBA" : 41934848, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "PhysicalSize" : 21470642176, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "SectorSize" : 512 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "NameSpace" : 1, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "DevicePath" : "/dev/nvme1n1", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "Firmware" : "5.15.0-1", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "Index" : 1, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "ModelNumber" : "Linux", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "SerialNumber" : "61c157a7b2d028903b27", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "UsedBytes" : 21470642176, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "MaximumLBA" : 41934848, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "PhysicalSize" : 21470642176, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "SectorSize" : 512 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "NameSpace" : 1, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "DevicePath" : "/dev/nvme2n1", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "Firmware" : "5.15.0-1", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "Index" : 2, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "ModelNumber" : "Linux", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "SerialNumber" : "c8bfb12ab4da82fe6601", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "UsedBytes" : 21470642176, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "MaximumLBA" : 41934848, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "PhysicalSize" : 21470642176, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "SectorSize" : 512 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "NameSpace" : 1, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "DevicePath" : "/dev/nvme3n1", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "Firmware" : "5.15.0-1", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "Index" : 3, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "ModelNumber" : "Linux", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "SerialNumber" : "5c94f58d64ed1622992e", 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "UsedBytes" : 21470642176, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "MaximumLBA" : 41934848, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "PhysicalSize" : 21470642176, 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: "SectorSize" : 512 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout: ] 2026-04-16T19:19:38.602 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-04-16T19:19:38.603 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-04-16T19:19:38.658 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:38.658 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:38.658 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00479224 s, 855 kB/s 2026-04-16T19:19:38.660 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-04-16T19:19:38.710 INFO:teuthology.orchestra.run.vm04.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:38.710 INFO:teuthology.orchestra.run.vm04.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:38.710 INFO:teuthology.orchestra.run.vm04.stdout:00000016 2026-04-16T19:19:38.710 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-04-16T19:19:38.766 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:38.766 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:38.766 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00387101 s, 1.1 MB/s 2026-04-16T19:19:38.767 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-04-16T19:19:38.822 INFO:teuthology.orchestra.run.vm04.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:38.822 INFO:teuthology.orchestra.run.vm04.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:38.822 INFO:teuthology.orchestra.run.vm04.stdout:40000016 2026-04-16T19:19:38.823 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-04-16T19:19:38.878 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:38.878 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:38.879 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00446737 s, 917 kB/s 2026-04-16T19:19:38.879 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-04-16T19:19:38.929 INFO:teuthology.orchestra.run.vm04.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:38.929 INFO:teuthology.orchestra.run.vm04.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:38.929 INFO:teuthology.orchestra.run.vm04.stdout:280000016 2026-04-16T19:19:38.930 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-04-16T19:19:38.984 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:38.985 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:38.985 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00359469 s, 1.1 MB/s 2026-04-16T19:19:38.985 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-04-16T19:19:39.036 INFO:teuthology.orchestra.run.vm04.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:39.036 INFO:teuthology.orchestra.run.vm04.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:39.036 INFO:teuthology.orchestra.run.vm04.stdout:00000016 2026-04-16T19:19:39.037 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-04-16T19:19:39.093 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:39.093 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:39.093 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00359585 s, 1.1 MB/s 2026-04-16T19:19:39.094 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-04-16T19:19:39.145 INFO:teuthology.orchestra.run.vm04.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:39.145 INFO:teuthology.orchestra.run.vm04.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:39.145 INFO:teuthology.orchestra.run.vm04.stdout:40000016 2026-04-16T19:19:39.146 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-04-16T19:19:39.218 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:39.218 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:39.218 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0213698 s, 192 kB/s 2026-04-16T19:19:39.219 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-04-16T19:19:39.268 INFO:teuthology.orchestra.run.vm04.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:39.268 INFO:teuthology.orchestra.run.vm04.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:39.268 INFO:teuthology.orchestra.run.vm04.stdout:280000016 2026-04-16T19:19:39.269 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-04-16T19:19:39.327 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:39.327 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:39.327 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00904669 s, 453 kB/s 2026-04-16T19:19:39.327 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-04-16T19:19:39.377 INFO:teuthology.orchestra.run.vm04.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:39.377 INFO:teuthology.orchestra.run.vm04.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:39.377 INFO:teuthology.orchestra.run.vm04.stdout:00000016 2026-04-16T19:19:39.378 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-04-16T19:19:39.434 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:39.435 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:39.435 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0045436 s, 901 kB/s 2026-04-16T19:19:39.435 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-04-16T19:19:39.485 INFO:teuthology.orchestra.run.vm04.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:39.485 INFO:teuthology.orchestra.run.vm04.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:39.485 INFO:teuthology.orchestra.run.vm04.stdout:40000016 2026-04-16T19:19:39.486 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-04-16T19:19:39.541 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:39.541 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:39.541 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00414865 s, 987 kB/s 2026-04-16T19:19:39.542 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-04-16T19:19:39.592 INFO:teuthology.orchestra.run.vm04.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:39.592 INFO:teuthology.orchestra.run.vm04.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:39.592 INFO:teuthology.orchestra.run.vm04.stdout:280000016 2026-04-16T19:19:39.593 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-04-16T19:19:39.649 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:39.649 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:39.649 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00370005 s, 1.1 MB/s 2026-04-16T19:19:39.650 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-04-16T19:19:39.705 INFO:teuthology.orchestra.run.vm04.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:39.705 INFO:teuthology.orchestra.run.vm04.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:39.705 INFO:teuthology.orchestra.run.vm04.stdout:00000016 2026-04-16T19:19:39.706 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-04-16T19:19:39.761 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:39.761 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:39.761 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00386327 s, 1.1 MB/s 2026-04-16T19:19:39.762 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-04-16T19:19:39.816 INFO:teuthology.orchestra.run.vm04.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:39.817 INFO:teuthology.orchestra.run.vm04.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:39.817 INFO:teuthology.orchestra.run.vm04.stdout:40000016 2026-04-16T19:19:39.817 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-04-16T19:19:39.875 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records in 2026-04-16T19:19:39.875 INFO:teuthology.orchestra.run.vm04.stderr:4096+0 records out 2026-04-16T19:19:39.875 INFO:teuthology.orchestra.run.vm04.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00497334 s, 824 kB/s 2026-04-16T19:19:39.876 DEBUG:teuthology.orchestra.run.vm04:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-04-16T19:19:39.928 INFO:teuthology.orchestra.run.vm04.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-16T19:19:39.929 INFO:teuthology.orchestra.run.vm04.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-16T19:19:39.929 INFO:teuthology.orchestra.run.vm04.stdout:280000016 2026-04-16T19:19:39.929 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-16T19:19:39.929 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-16T19:19:39.929 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/scratch_devs 2026-04-16T19:19:39.985 INFO:teuthology.run_tasks:Running task cephadm... 2026-04-16T19:19:40.031 INFO:tasks.cephadm:Config: {'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_DAEMON_PLACE_FAIL', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': 'c03ba9ecf58a4116bdd5049c6e392c7a287bc4f8', 'cephadm_binary_url': 'https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm', 'containers': {'image': 'harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7'}} 2026-04-16T19:19:40.031 INFO:tasks.cephadm:Provided image contains tag or digest, using it as is 2026-04-16T19:19:40.031 INFO:tasks.cephadm:Cluster image is harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 2026-04-16T19:19:40.031 INFO:tasks.cephadm:Cluster fsid is 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:19:40.031 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-04-16T19:19:40.031 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-04-16T19:19:40.031 INFO:tasks.cephadm:Monitor IPs: {'mon.vm01': '192.168.123.101', 'mon.vm04': '192.168.123.104'} 2026-04-16T19:19:40.031 INFO:tasks.cephadm:Normalizing hostnames... 2026-04-16T19:19:40.031 DEBUG:teuthology.orchestra.run.vm01:> sudo hostname $(hostname -s) 2026-04-16T19:19:40.041 DEBUG:teuthology.orchestra.run.vm04:> sudo hostname $(hostname -s) 2026-04-16T19:19:40.051 INFO:tasks.cephadm:Downloading cephadm from url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm 2026-04-16T19:19:40.051 DEBUG:teuthology.orchestra.run.vm01:> curl --silent -L https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-04-16T19:19:41.243 INFO:teuthology.orchestra.run.vm01.stdout:-rw-rw-r-- 1 ubuntu ubuntu 1036391 Apr 16 19:19 /home/ubuntu/cephtest/cephadm 2026-04-16T19:19:41.244 DEBUG:teuthology.orchestra.run.vm04:> curl --silent -L https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-04-16T19:19:42.398 INFO:teuthology.orchestra.run.vm04.stdout:-rw-rw-r-- 1 ubuntu ubuntu 1036391 Apr 16 19:19 /home/ubuntu/cephtest/cephadm 2026-04-16T19:19:42.398 DEBUG:teuthology.orchestra.run.vm01:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-04-16T19:19:42.404 DEBUG:teuthology.orchestra.run.vm04:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-04-16T19:19:42.411 INFO:tasks.cephadm:Pulling image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 on all hosts... 2026-04-16T19:19:42.411 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 pull 2026-04-16T19:19:42.452 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 pull 2026-04-16T19:19:42.733 INFO:teuthology.orchestra.run.vm01.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7... 2026-04-16T19:19:42.750 INFO:teuthology.orchestra.run.vm04.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7... 2026-04-16T19:20:02.149 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-04-16T19:20:02.150 INFO:teuthology.orchestra.run.vm04.stdout: "ceph_version": "ceph version 20.2.0-21-gc03ba9ecf58 (c03ba9ecf58a4116bdd5049c6e392c7a287bc4f8) tentacle (stable)", 2026-04-16T19:20:02.150 INFO:teuthology.orchestra.run.vm04.stdout: "image_id": "fc41d50a3963b1f80069c41228698fd9d125d9478206ca50fa68bb35558d951e", 2026-04-16T19:20:02.150 INFO:teuthology.orchestra.run.vm04.stdout: "repo_digests": [ 2026-04-16T19:20:02.150 INFO:teuthology.orchestra.run.vm04.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:fc41d50a3963b1f80069c41228698fd9d125d9478206ca50fa68bb35558d951e" 2026-04-16T19:20:02.150 INFO:teuthology.orchestra.run.vm04.stdout: ] 2026-04-16T19:20:02.150 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-04-16T19:20:03.747 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-04-16T19:20:03.747 INFO:teuthology.orchestra.run.vm01.stdout: "ceph_version": "ceph version 20.2.0-21-gc03ba9ecf58 (c03ba9ecf58a4116bdd5049c6e392c7a287bc4f8) tentacle (stable)", 2026-04-16T19:20:03.747 INFO:teuthology.orchestra.run.vm01.stdout: "image_id": "fc41d50a3963b1f80069c41228698fd9d125d9478206ca50fa68bb35558d951e", 2026-04-16T19:20:03.747 INFO:teuthology.orchestra.run.vm01.stdout: "repo_digests": [ 2026-04-16T19:20:03.747 INFO:teuthology.orchestra.run.vm01.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:fc41d50a3963b1f80069c41228698fd9d125d9478206ca50fa68bb35558d951e" 2026-04-16T19:20:03.747 INFO:teuthology.orchestra.run.vm01.stdout: ] 2026-04-16T19:20:03.747 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-04-16T19:20:03.771 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /etc/ceph 2026-04-16T19:20:03.779 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /etc/ceph 2026-04-16T19:20:03.789 DEBUG:teuthology.orchestra.run.vm01:> sudo chmod 777 /etc/ceph 2026-04-16T19:20:03.832 DEBUG:teuthology.orchestra.run.vm04:> sudo chmod 777 /etc/ceph 2026-04-16T19:20:03.845 INFO:tasks.cephadm:Writing seed config... 2026-04-16T19:20:03.845 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-04-16T19:20:03.845 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-04-16T19:20:03.845 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-04-16T19:20:03.845 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-04-16T19:20:03.845 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-04-16T19:20:03.845 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-04-16T19:20:03.845 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-04-16T19:20:03.845 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-04-16T19:20:03.845 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-04-16T19:20:03.846 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:20:03.846 DEBUG:teuthology.orchestra.run.vm01:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-04-16T19:20:03.878 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=isa technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 3711bb6a-39c9-11f1-9688-8928648d55a6 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-04-16T19:20:03.879 DEBUG:teuthology.orchestra.run.vm01:mon.vm01> sudo journalctl -f -n 0 -u ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mon.vm01.service 2026-04-16T19:20:03.920 INFO:tasks.cephadm:Bootstrapping... 2026-04-16T19:20:03.920 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 -v bootstrap --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.101 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-04-16T19:20:04.245 INFO:teuthology.orchestra.run.vm01.stdout:-------------------------------------------------------------------------------- 2026-04-16T19:20:04.245 INFO:teuthology.orchestra.run.vm01.stdout:cephadm ['--image', 'harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7', '-v', 'bootstrap', '--fsid', '3711bb6a-39c9-11f1-9688-8928648d55a6', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.101', '--skip-admin-label'] 2026-04-16T19:20:04.245 INFO:teuthology.orchestra.run.vm01.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-04-16T19:20:04.246 INFO:teuthology.orchestra.run.vm01.stdout:Verifying podman|docker is present... 2026-04-16T19:20:04.246 INFO:teuthology.orchestra.run.vm01.stdout:Verifying lvm2 is present... 2026-04-16T19:20:04.246 INFO:teuthology.orchestra.run.vm01.stdout:Verifying time synchronization is in place... 2026-04-16T19:20:04.249 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-04-16T19:20:04.249 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-04-16T19:20:04.252 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-04-16T19:20:04.252 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-04-16T19:20:04.255 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled chronyd.service 2026-04-16T19:20:04.255 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to get unit file state for chronyd.service: No such file or directory 2026-04-16T19:20:04.257 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active chronyd.service 2026-04-16T19:20:04.257 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-04-16T19:20:04.260 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled systemd-timesyncd.service 2026-04-16T19:20:04.260 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout masked 2026-04-16T19:20:04.263 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active systemd-timesyncd.service 2026-04-16T19:20:04.263 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-04-16T19:20:04.265 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled ntpd.service 2026-04-16T19:20:04.265 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to get unit file state for ntpd.service: No such file or directory 2026-04-16T19:20:04.268 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active ntpd.service 2026-04-16T19:20:04.268 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-04-16T19:20:04.272 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout enabled 2026-04-16T19:20:04.274 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout active 2026-04-16T19:20:04.274 INFO:teuthology.orchestra.run.vm01.stdout:Unit ntp.service is enabled and running 2026-04-16T19:20:04.274 INFO:teuthology.orchestra.run.vm01.stdout:Repeating the final host check... 2026-04-16T19:20:04.275 INFO:teuthology.orchestra.run.vm01.stdout:docker (/usr/bin/docker) is present 2026-04-16T19:20:04.275 INFO:teuthology.orchestra.run.vm01.stdout:systemctl is present 2026-04-16T19:20:04.275 INFO:teuthology.orchestra.run.vm01.stdout:lvcreate is present 2026-04-16T19:20:04.277 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-04-16T19:20:04.277 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-04-16T19:20:04.280 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-04-16T19:20:04.280 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-04-16T19:20:04.282 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled chronyd.service 2026-04-16T19:20:04.282 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to get unit file state for chronyd.service: No such file or directory 2026-04-16T19:20:04.285 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active chronyd.service 2026-04-16T19:20:04.285 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-04-16T19:20:04.288 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled systemd-timesyncd.service 2026-04-16T19:20:04.288 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout masked 2026-04-16T19:20:04.291 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active systemd-timesyncd.service 2026-04-16T19:20:04.291 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-04-16T19:20:04.294 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled ntpd.service 2026-04-16T19:20:04.294 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to get unit file state for ntpd.service: No such file or directory 2026-04-16T19:20:04.297 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active ntpd.service 2026-04-16T19:20:04.297 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-04-16T19:20:04.301 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout enabled 2026-04-16T19:20:04.304 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout active 2026-04-16T19:20:04.304 INFO:teuthology.orchestra.run.vm01.stdout:Unit ntp.service is enabled and running 2026-04-16T19:20:04.304 INFO:teuthology.orchestra.run.vm01.stdout:Host looks OK 2026-04-16T19:20:04.304 INFO:teuthology.orchestra.run.vm01.stdout:Cluster fsid: 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:04.304 INFO:teuthology.orchestra.run.vm01.stdout:Acquiring lock 140365378020704 on /run/cephadm/3711bb6a-39c9-11f1-9688-8928648d55a6.lock 2026-04-16T19:20:04.304 INFO:teuthology.orchestra.run.vm01.stdout:Lock 140365378020704 acquired on /run/cephadm/3711bb6a-39c9-11f1-9688-8928648d55a6.lock 2026-04-16T19:20:04.304 INFO:teuthology.orchestra.run.vm01.stdout:Verifying IP 192.168.123.101 port 3300 ... 2026-04-16T19:20:04.304 INFO:teuthology.orchestra.run.vm01.stdout:Verifying IP 192.168.123.101 port 6789 ... 2026-04-16T19:20:04.304 INFO:teuthology.orchestra.run.vm01.stdout:Base mon IP(s) is [192.168.123.101:3300, 192.168.123.101:6789], mon addrv is [v2:192.168.123.101:3300,v1:192.168.123.101:6789] 2026-04-16T19:20:04.306 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout default via 192.168.123.1 dev ens3 proto dhcp src 192.168.123.101 metric 100 2026-04-16T19:20:04.306 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown 2026-04-16T19:20:04.306 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout 192.168.123.0/24 dev ens3 proto kernel scope link src 192.168.123.101 metric 100 2026-04-16T19:20:04.306 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout 192.168.123.1 dev ens3 proto dhcp scope link src 192.168.123.101 metric 100 2026-04-16T19:20:04.308 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-04-16T19:20:04.308 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout fe80::/64 dev ens3 proto kernel metric 256 pref medium 2026-04-16T19:20:04.309 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-04-16T19:20:04.309 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout inet6 ::1/128 scope host 2026-04-16T19:20:04.309 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-04-16T19:20:04.309 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout 2: ens3: mtu 1500 state UP qlen 1000 2026-04-16T19:20:04.309 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout inet6 fe80::5055:ff:fe00:1/64 scope link 2026-04-16T19:20:04.309 INFO:teuthology.orchestra.run.vm01.stdout:/usr/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-04-16T19:20:04.309 INFO:teuthology.orchestra.run.vm01.stdout:Mon IP `192.168.123.101` is in CIDR network `192.168.123.0/24` 2026-04-16T19:20:04.310 INFO:teuthology.orchestra.run.vm01.stdout:Mon IP `192.168.123.101` is in CIDR network `192.168.123.0/24` 2026-04-16T19:20:04.310 INFO:teuthology.orchestra.run.vm01.stdout:Mon IP `192.168.123.101` is in CIDR network `192.168.123.1/32` 2026-04-16T19:20:04.310 INFO:teuthology.orchestra.run.vm01.stdout:Mon IP `192.168.123.101` is in CIDR network `192.168.123.1/32` 2026-04-16T19:20:04.310 INFO:teuthology.orchestra.run.vm01.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24', '192.168.123.1/32', '192.168.123.1/32'] 2026-04-16T19:20:04.310 INFO:teuthology.orchestra.run.vm01.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-04-16T19:20:04.310 INFO:teuthology.orchestra.run.vm01.stdout:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7... 2026-04-16T19:20:04.702 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/docker: stdout sse-s3-kmip-preview-not-for-production-7: Pulling from custom-ceph/ceph/ceph 2026-04-16T19:20:04.702 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/docker: stdout Digest: sha256:fc41d50a3963b1f80069c41228698fd9d125d9478206ca50fa68bb35558d951e 2026-04-16T19:20:04.702 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/docker: stdout Status: Image is up to date for harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 2026-04-16T19:20:04.702 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/docker: stdout harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 2026-04-16T19:20:04.982 INFO:teuthology.orchestra.run.vm01.stdout:ceph: stdout ceph version 20.2.0-21-gc03ba9ecf58 (c03ba9ecf58a4116bdd5049c6e392c7a287bc4f8) tentacle (stable) 2026-04-16T19:20:04.982 INFO:teuthology.orchestra.run.vm01.stdout:Ceph version: ceph version 20.2.0-21-gc03ba9ecf58 (c03ba9ecf58a4116bdd5049c6e392c7a287bc4f8) tentacle (stable) 2026-04-16T19:20:04.983 INFO:teuthology.orchestra.run.vm01.stdout:Extracting ceph user uid/gid from container image... 2026-04-16T19:20:05.082 INFO:teuthology.orchestra.run.vm01.stdout:stat: stdout 167 167 2026-04-16T19:20:05.082 INFO:teuthology.orchestra.run.vm01.stdout:Creating initial keys... 2026-04-16T19:20:05.209 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-authtool: stdout AQBlNuFp/OT0ChAATLDFiS7x3NQenzqV3KGoBg== 2026-04-16T19:20:05.320 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-authtool: stdout AQBlNuFpjNZFERAADs4dFVvzgLu0q6p0WzeSIA== 2026-04-16T19:20:05.449 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-authtool: stdout AQBlNuFpMm4HGBAAra5mY6LfB6aXVSoYrFoeVQ== 2026-04-16T19:20:05.449 INFO:teuthology.orchestra.run.vm01.stdout:Creating initial monmap... 2026-04-16T19:20:05.566 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-04-16T19:20:05.566 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = tentacle 2026-04-16T19:20:05.566 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:05.566 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-04-16T19:20:05.566 INFO:teuthology.orchestra.run.vm01.stdout:monmaptool for vm01 [v2:192.168.123.101:3300,v1:192.168.123.101:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-04-16T19:20:05.566 INFO:teuthology.orchestra.run.vm01.stdout:setting min_mon_release = tentacle 2026-04-16T19:20:05.566 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: set fsid to 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:05.566 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-04-16T19:20:05.566 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:20:05.566 INFO:teuthology.orchestra.run.vm01.stdout:Creating mon... 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 0 set uid:gid to 167:167 (ceph:ceph) 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 1 imported monmap: 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr epoch 0 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr last_changed 2026-04-16T19:20:05.536485+0000 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr created 2026-04-16T19:20:05.536485+0000 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr min_mon_release 20 (tentacle) 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr election_strategy: 1 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.vm01 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 0 /usr/bin/ceph-mon: set fsid to 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: RocksDB version: 7.9.2 2026-04-16T19:20:05.751 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.752 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Git sha 0 2026-04-16T19:20:05.752 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Compile date 2026-04-16 08:50:14 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: DB SUMMARY 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: DB Session ID: XFO436AB4TY963842JBS 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: SST files in /var/lib/ceph/mon/ceph-vm01/store.db dir, Total Num: 0, files: 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm01/store.db: 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.error_if_exists: 0 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.create_if_missing: 1 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.paranoid_checks: 1 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.flush_verify_memtable_count: 1 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.env: 0x556ea4761440 2026-04-16T19:20:05.755 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.fs: PosixFileSystem 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.info_log: 0x556eb40adc20 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.max_file_opening_threads: 16 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.statistics: (nil) 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.use_fsync: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.max_log_file_size: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.max_manifest_file_size: 1073741824 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.log_file_time_to_roll: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.keep_log_file_num: 1000 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.recycle_log_file_num: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.allow_fallocate: 1 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.allow_mmap_reads: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.allow_mmap_writes: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.use_direct_reads: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.create_missing_column_families: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.db_log_dir: 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.wal_dir: 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.table_cache_numshardbits: 6 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.WAL_ttl_seconds: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.702+0000 7f613e1bcd40 4 rocksdb: Options.WAL_size_limit_MB: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.manifest_preallocation_size: 4194304 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.is_fd_close_on_exec: 1 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.advise_random_on_open: 1 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.db_write_buffer_size: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.write_buffer_manager: 0x556eb40a0780 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.access_hint_on_compaction_start: 1 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.random_access_max_buffer_size: 1048576 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.use_adaptive_mutex: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.rate_limiter: (nil) 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.wal_recovery_mode: 2 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.enable_thread_tracking: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.enable_pipelined_write: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.unordered_write: 0 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.allow_concurrent_memtable_write: 1 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.write_thread_max_yield_usec: 100 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.write_thread_slow_yield_usec: 3 2026-04-16T19:20:05.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.row_cache: None 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.wal_filter: None 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.avoid_flush_during_recovery: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.allow_ingest_behind: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.two_write_queues: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.manual_wal_flush: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.wal_compression: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.atomic_flush: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.persist_stats_to_disk: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.write_dbid_to_manifest: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.log_readahead_size: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.file_checksum_gen_factory: Unknown 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.best_efforts_recovery: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.allow_data_in_errors: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.db_host_id: __hostname__ 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.enforce_single_del_contracts: true 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_background_jobs: 2 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_background_compactions: -1 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_subcompactions: 1 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.avoid_flush_during_shutdown: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.delayed_write_rate : 16777216 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_total_wal_size: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.stats_dump_period_sec: 600 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.stats_persist_period_sec: 600 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.stats_history_buffer_size: 1048576 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_open_files: -1 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bytes_per_sync: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.wal_bytes_per_sync: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.strict_bytes_per_sync: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_readahead_size: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_background_flushes: -1 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Compression algorithms supported: 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: kZSTD supported: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: kXpressCompression supported: 0 2026-04-16T19:20:05.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: kBZip2Compression supported: 0 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: kZSTDNotFinalCompression supported: 0 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: kLZ4Compression supported: 1 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: kZlibCompression supported: 1 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: kLZ4HCCompression supported: 1 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: kSnappyCompression supported: 1 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Fast CRC32 supported: Supported on x86 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: DMutex implementation: pthread_mutex_t 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: [db/db_impl/db_impl_open.cc:317] Creating manifest 1 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm01/store.db/MANIFEST-000001 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.merge_operator: 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_filter: None 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_filter_factory: None 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.sst_partitioner_factory: None 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.memtable_factory: SkipListFactory 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.table_factory: BlockBasedTable 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x556eb409dc20) 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr cache_index_and_filter_blocks: 1 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr cache_index_and_filter_blocks_with_high_priority: 0 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr pin_l0_filter_and_index_blocks_in_cache: 0 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr pin_top_level_index_and_filter: 1 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr index_type: 0 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr data_block_index_type: 0 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr index_shortening: 1 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr data_block_hash_table_util_ratio: 0.750000 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr checksum: 4 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr no_block_cache: 0 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr block_cache: 0x556eb40938d0 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr block_cache_name: BinnedLRUCache 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr block_cache_options: 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr capacity : 536870912 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr num_shard_bits : 4 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr strict_capacity_limit : 0 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr high_pri_pool_ratio: 0.000 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr block_cache_compressed: (nil) 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr persistent_cache: (nil) 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr block_size: 4096 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr block_size_deviation: 10 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr block_restart_interval: 16 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr index_block_restart_interval: 1 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr metadata_block_size: 4096 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr partition_filters: 0 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr use_delta_encoding: 1 2026-04-16T19:20:05.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr filter_policy: bloomfilter 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr whole_key_filtering: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr verify_compression: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr read_amp_bytes_per_bit: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr format_version: 5 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr enable_index_compression: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr block_align: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr max_auto_readahead_size: 262144 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr prepopulate_block_cache: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr initial_auto_readahead_size: 8192 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr num_file_reads_for_auto_readahead: 2 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.write_buffer_size: 33554432 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_write_buffer_number: 2 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compression: NoCompression 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bottommost_compression: Disabled 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.prefix_extractor: nullptr 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.num_levels: 7 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bottommost_compression_opts.level: 32767 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bottommost_compression_opts.enabled: false 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compression_opts.window_bits: -14 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compression_opts.level: 32767 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compression_opts.strategy: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compression_opts.parallel_threads: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compression_opts.enabled: false 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.level0_stop_writes_trigger: 36 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.target_file_size_base: 67108864 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.target_file_size_multiplier: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_bytes_for_level_base: 268435456 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_compaction_bytes: 1677721600 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.arena_block_size: 1048576 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-04-16T19:20:05.759 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.disable_auto_compactions: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.inplace_update_support: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.inplace_update_num_locks: 10000 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.memtable_whole_key_filtering: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.memtable_huge_page_size: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.bloom_locality: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.max_successive_merges: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.optimize_filters_for_hits: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.paranoid_file_checks: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.force_consistency_checks: 1 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.report_bg_io_stats: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.ttl: 2592000 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.periodic_compaction_seconds: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.preclude_last_level_data_seconds: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.preserve_internal_time_seconds: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.enable_blob_files: false 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.min_blob_size: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.blob_file_size: 268435456 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.blob_compression_type: NoCompression 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.enable_blob_garbage_collection: false 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.blob_compaction_readahead_size: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.blob_file_starting_level: 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm01/store.db/MANIFEST-000001 succeeded,manifest_file_number is 1, next_file_number is 3, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 4dd56a05-2869-47e0-9acb-29a00f15c32a 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.706+0000 7f613e1bcd40 4 rocksdb: [db/version_set.cc:5047] Creating manifest 5 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.710+0000 7f613e1bcd40 4 rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x556eb40bee00 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.710+0000 7f613e1bcd40 4 rocksdb: DB pointer 0x556eb41e8000 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.710+0000 7f6135946640 4 rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.710+0000 7f6135946640 4 rocksdb: [db/db_impl/db_impl.cc:1111] 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr ** DB Stats ** 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Uptime(secs): 0.0 total, 0.0 interval 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr ** Compaction Stats [default] ** 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-16T19:20:05.760 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr ** Compaction Stats [default] ** 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Uptime(secs): 0.0 total, 0.0 interval 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Flush(GB): cumulative 0.000, interval 0.000 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr AddFile(GB): cumulative 0.000, interval 0.000 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr AddFile(Total Files): cumulative 0, interval 0 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr AddFile(L0 Files): cumulative 0, interval 0 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr AddFile(Keys): cumulative 0, interval 0 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Block cache BinnedLRUCache@0x556eb40938d0#7 capacity: 512.00 MB usage: 0.00 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 2.9e-05 secs_since: 0 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr Block cache entry stats(count,size,portion): Misc(1,0.00 KB,0%) 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr ** File Read Latency Histogram By Level [default] ** 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.714+0000 7f613e1bcd40 4 rocksdb: [db/db_impl/db_impl.cc:496] Shutdown: canceling all background work 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.714+0000 7f613e1bcd40 4 rocksdb: [db/db_impl/db_impl.cc:704] Shutdown complete 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-16T19:20:05.714+0000 7f613e1bcd40 0 /usr/bin/ceph-mon: created monfs at /var/lib/ceph/mon/ceph-vm01 for mon.vm01 2026-04-16T19:20:05.761 INFO:teuthology.orchestra.run.vm01.stdout:create mon.vm01 on 2026-04-16T19:20:06.133 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-04-16T19:20:06.344 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6.target → /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6.target. 2026-04-16T19:20:06.344 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6.target → /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6.target. 2026-04-16T19:20:06.548 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mon.vm01 2026-04-16T19:20:06.548 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to reset failed state of unit ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mon.vm01.service: Unit ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mon.vm01.service not loaded. 2026-04-16T19:20:06.733 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6.target.wants/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mon.vm01.service → /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service. 2026-04-16T19:20:06.743 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-04-16T19:20:06.743 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to enable service . firewalld.service is not available 2026-04-16T19:20:06.743 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mon to start... 2026-04-16T19:20:06.743 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mon... 2026-04-16T19:20:06.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:06 vm01 bash[27719]: cluster 2026-04-16T19:20:06.874861+0000 mon.vm01 (mon.0) 1 : cluster [INF] mon.vm01 is new leader, mons vm01 in quorum (ranks 0) 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout cluster: 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout id: 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout services: 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum vm01 (age 0.154242s) [leader: vm01] 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout data: 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout pgs: 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:mon is available 2026-04-16T19:20:07.107 INFO:teuthology.orchestra.run.vm01.stdout:Assimilating anything we can from ceph.conf... 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [global] 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout fsid = 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.101:3300,v1:192.168.123.101:6789] 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [mgr] 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [osd] 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-04-16T19:20:07.463 INFO:teuthology.orchestra.run.vm01.stdout:Generating new minimal ceph.conf... 2026-04-16T19:20:07.790 INFO:teuthology.orchestra.run.vm01.stdout:Restarting the monitor... 2026-04-16T19:20:07.989 INFO:teuthology.orchestra.run.vm01.stdout:Setting public_network to 192.168.123.0/24,192.168.123.1/32 in global config section 2026-04-16T19:20:08.008 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:07 vm01 systemd[1]: Stopping Ceph mon.vm01 for 3711bb6a-39c9-11f1-9688-8928648d55a6... 2026-04-16T19:20:08.008 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:07 vm01 bash[27719]: debug 2026-04-16T19:20:07.834+0000 7f05a1112640 -1 received signal: Terminated from /sbin/docker-init -- /usr/bin/ceph-mon -n mon.vm01 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-stderr=true --default-log-stderr-prefix=debug --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-stderr=true (PID: 1) UID: 0 2026-04-16T19:20:08.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:07 vm01 bash[27719]: debug 2026-04-16T19:20:07.834+0000 7f05a1112640 -1 mon.vm01@0(leader) e1 *** Got Signal Terminated *** 2026-04-16T19:20:08.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:07 vm01 bash[28125]: ceph-3711bb6a-39c9-11f1-9688-8928648d55a6-mon-vm01 2026-04-16T19:20:08.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:07 vm01 systemd[1]: ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mon.vm01.service: Deactivated successfully. 2026-04-16T19:20:08.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:07 vm01 systemd[1]: Stopped Ceph mon.vm01 for 3711bb6a-39c9-11f1-9688-8928648d55a6. 2026-04-16T19:20:08.009 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:07 vm01 systemd[1]: Started Ceph mon.vm01 for 3711bb6a-39c9-11f1-9688-8928648d55a6. 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.122+0000 7f7e96cc8d40 0 set uid:gid to 167:167 (ceph:ceph) 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.122+0000 7f7e96cc8d40 0 ceph version 20.2.0-21-gc03ba9ecf58 (c03ba9ecf58a4116bdd5049c6e392c7a287bc4f8) tentacle (stable - RelWithDebInfo), process ceph-mon, pid 7 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.122+0000 7f7e96cc8d40 0 pidfile_write: ignore empty --pid-file 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 0 load: jerasure load: lrc 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: RocksDB version: 7.9.2 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Git sha 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Compile date 2026-04-16 08:50:14 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: DB SUMMARY 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: DB Session ID: 006HW1SPGN22EDNE11JY 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: CURRENT file: CURRENT 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: IDENTITY file: IDENTITY 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: MANIFEST file: MANIFEST-000010 size: 179 Bytes 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: SST files in /var/lib/ceph/mon/ceph-vm01/store.db dir, Total Num: 1, files: 000008.sst 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm01/store.db: 000009.log size: 76257 ; 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.error_if_exists: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.create_if_missing: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.paranoid_checks: 1 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.flush_verify_memtable_count: 1 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.env: 0x558ca73d3440 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.fs: PosixFileSystem 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.info_log: 0x558cdb6e5f00 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.max_file_opening_threads: 16 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.statistics: (nil) 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.use_fsync: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.max_log_file_size: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.max_manifest_file_size: 1073741824 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.log_file_time_to_roll: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.keep_log_file_num: 1000 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.recycle_log_file_num: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.allow_fallocate: 1 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.allow_mmap_reads: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.allow_mmap_writes: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.use_direct_reads: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.create_missing_column_families: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.db_log_dir: 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.wal_dir: 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.table_cache_numshardbits: 6 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.WAL_ttl_seconds: 0 2026-04-16T19:20:08.313 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.WAL_size_limit_MB: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.manifest_preallocation_size: 4194304 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.is_fd_close_on_exec: 1 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.advise_random_on_open: 1 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.db_write_buffer_size: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.write_buffer_manager: 0x558cdb6e8500 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.access_hint_on_compaction_start: 1 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.random_access_max_buffer_size: 1048576 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.use_adaptive_mutex: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.rate_limiter: (nil) 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.wal_recovery_mode: 2 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.enable_thread_tracking: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.enable_pipelined_write: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.unordered_write: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.allow_concurrent_memtable_write: 1 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.write_thread_max_yield_usec: 100 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.write_thread_slow_yield_usec: 3 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.row_cache: None 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.wal_filter: None 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.avoid_flush_during_recovery: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.allow_ingest_behind: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.two_write_queues: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.manual_wal_flush: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.wal_compression: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.atomic_flush: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.persist_stats_to_disk: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.write_dbid_to_manifest: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.log_readahead_size: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.file_checksum_gen_factory: Unknown 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.best_efforts_recovery: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.allow_data_in_errors: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.db_host_id: __hostname__ 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.enforce_single_del_contracts: true 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.max_background_jobs: 2 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.max_background_compactions: -1 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.max_subcompactions: 1 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.avoid_flush_during_shutdown: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.delayed_write_rate : 16777216 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.max_total_wal_size: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.stats_dump_period_sec: 600 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.stats_persist_period_sec: 600 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.stats_history_buffer_size: 1048576 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.max_open_files: -1 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.bytes_per_sync: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.wal_bytes_per_sync: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.strict_bytes_per_sync: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_readahead_size: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Options.max_background_flushes: -1 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: Compression algorithms supported: 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.126+0000 7f7e96cc8d40 4 rocksdb: kZSTD supported: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: kXpressCompression supported: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: kBZip2Compression supported: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: kZSTDNotFinalCompression supported: 0 2026-04-16T19:20:08.314 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: kLZ4Compression supported: 1 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: kZlibCompression supported: 1 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: kLZ4HCCompression supported: 1 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: kSnappyCompression supported: 1 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Fast CRC32 supported: Supported on x86 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: DMutex implementation: pthread_mutex_t 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm01/store.db/MANIFEST-000010 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.merge_operator: 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_filter: None 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_filter_factory: None 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.sst_partitioner_factory: None 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.memtable_factory: SkipListFactory 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.table_factory: BlockBasedTable 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x558cdb6e4dc0) 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cache_index_and_filter_blocks: 1 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cache_index_and_filter_blocks_with_high_priority: 0 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: pin_l0_filter_and_index_blocks_in_cache: 0 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: pin_top_level_index_and_filter: 1 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: index_type: 0 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: data_block_index_type: 0 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: index_shortening: 1 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: data_block_hash_table_util_ratio: 0.750000 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: checksum: 4 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: no_block_cache: 0 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: block_cache: 0x558cdb6db8d0 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: block_cache_name: BinnedLRUCache 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: block_cache_options: 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: capacity : 536870912 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: num_shard_bits : 4 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: strict_capacity_limit : 0 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: high_pri_pool_ratio: 0.000 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: block_cache_compressed: (nil) 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: persistent_cache: (nil) 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: block_size: 4096 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: block_size_deviation: 10 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: block_restart_interval: 16 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: index_block_restart_interval: 1 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: metadata_block_size: 4096 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: partition_filters: 0 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: use_delta_encoding: 1 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: filter_policy: bloomfilter 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: whole_key_filtering: 1 2026-04-16T19:20:08.315 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: verify_compression: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: read_amp_bytes_per_bit: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: format_version: 5 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: enable_index_compression: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: block_align: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: max_auto_readahead_size: 262144 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: prepopulate_block_cache: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: initial_auto_readahead_size: 8192 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: num_file_reads_for_auto_readahead: 2 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.write_buffer_size: 33554432 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.max_write_buffer_number: 2 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.compression: NoCompression 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.bottommost_compression: Disabled 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.prefix_extractor: nullptr 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.num_levels: 7 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.130+0000 7f7e96cc8d40 4 rocksdb: Options.bottommost_compression_opts.level: 32767 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.bottommost_compression_opts.enabled: false 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compression_opts.window_bits: -14 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compression_opts.level: 32767 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compression_opts.strategy: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compression_opts.parallel_threads: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compression_opts.enabled: false 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.level0_stop_writes_trigger: 36 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.target_file_size_base: 67108864 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.target_file_size_multiplier: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_bytes_for_level_base: 268435456 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_compaction_bytes: 1677721600 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.arena_block_size: 1048576 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.disable_auto_compactions: 0 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-04-16T19:20:08.316 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.inplace_update_support: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.inplace_update_num_locks: 10000 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.memtable_whole_key_filtering: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.memtable_huge_page_size: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.bloom_locality: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.max_successive_merges: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.optimize_filters_for_hits: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.paranoid_file_checks: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.force_consistency_checks: 1 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.report_bg_io_stats: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.ttl: 2592000 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.periodic_compaction_seconds: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.preclude_last_level_data_seconds: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.preserve_internal_time_seconds: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.enable_blob_files: false 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.min_blob_size: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.blob_file_size: 268435456 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.blob_compression_type: NoCompression 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.enable_blob_garbage_collection: false 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.blob_compaction_readahead_size: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.blob_file_starting_level: 0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm01/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 4dd56a05-2869-47e0-9acb-29a00f15c32a 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1776367208136741, "job": 1, "event": "recovery_started", "wal_files": [9]} 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.134+0000 7f7e96cc8d40 4 rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.138+0000 7f7e96cc8d40 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1776367208140623, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 73352, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 8, "largest_seqno": 227, "table_properties": {"data_size": 71638, "index_size": 167, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 517, "raw_key_size": 9666, "raw_average_key_size": 48, "raw_value_size": 66145, "raw_average_value_size": 334, "num_data_blocks": 8, "num_entries": 198, "num_filter_entries": 198, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1776367208, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "4dd56a05-2869-47e0-9acb-29a00f15c32a", "db_session_id": "006HW1SPGN22EDNE11JY", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}} 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.138+0000 7f7e96cc8d40 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1776367208140690, "job": 1, "event": "recovery_finished"} 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.138+0000 7f7e96cc8d40 4 rocksdb: [db/version_set.cc:5047] Creating manifest 15 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 4 rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm01/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 4 rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x558cdb706e00 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 4 rocksdb: DB pointer 0x558cdb856000 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e8ca6e640 4 rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e8ca6e640 4 rocksdb: [db/db_impl/db_impl.cc:1111] 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: ** DB Stats ** 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Uptime(secs): 0.0 total, 0.0 interval 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: ** Compaction Stats [default] ** 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: L0 2/0 73.51 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 38.6 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Sum 2/0 73.51 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 38.6 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 38.6 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-16T19:20:08.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: ** Compaction Stats [default] ** 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 38.6 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Uptime(secs): 0.0 total, 0.0 interval 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Flush(GB): cumulative 0.000, interval 0.000 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: AddFile(GB): cumulative 0.000, interval 0.000 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: AddFile(Total Files): cumulative 0, interval 0 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: AddFile(L0 Files): cumulative 0, interval 0 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: AddFile(Keys): cumulative 0, interval 0 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Cumulative compaction: 0.00 GB write, 5.58 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Interval compaction: 0.00 GB write, 5.58 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Block cache BinnedLRUCache@0x558cdb6db8d0#7 capacity: 512.00 MB usage: 1.06 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 3e-05 secs_since: 0 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: Block cache entry stats(count,size,portion): FilterBlock(2,0.70 KB,0.00013411%) IndexBlock(2,0.36 KB,6.85453e-05%) Misc(1,0.00 KB,0%) 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: ** File Read Latency Histogram By Level [default] ** 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 0 starting mon.vm01 rank 0 at public addrs [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] at bind addrs [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon_data /var/lib/ceph/mon/ceph-vm01 fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 1 mon.vm01@-1(???) e1 preinit fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 0 mon.vm01@-1(???).mds e1 new map 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 0 mon.vm01@-1(???).mds e1 print_map 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: e1 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: btime 2026-04-16T19:20:06:880574+0000 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: enable_multiple, ever_enabled_multiple: 1,1 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: legacy client fscid: -1 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: No filesystems configured 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 0 mon.vm01@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 0 mon.vm01@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 0 mon.vm01@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 0 mon.vm01@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: debug 2026-04-16T19:20:08.142+0000 7f7e96cc8d40 1 mon.vm01@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151103+0000 mon.vm01 (mon.0) 1 : cluster [INF] mon.vm01 is new leader, mons vm01 in quorum (ranks 0) 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151103+0000 mon.vm01 (mon.0) 1 : cluster [INF] mon.vm01 is new leader, mons vm01 in quorum (ranks 0) 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151133+0000 mon.vm01 (mon.0) 2 : cluster [DBG] monmap epoch 1 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151133+0000 mon.vm01 (mon.0) 2 : cluster [DBG] monmap epoch 1 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151139+0000 mon.vm01 (mon.0) 3 : cluster [DBG] fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151139+0000 mon.vm01 (mon.0) 3 : cluster [DBG] fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151142+0000 mon.vm01 (mon.0) 4 : cluster [DBG] last_changed 2026-04-16T19:20:05.536485+0000 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151142+0000 mon.vm01 (mon.0) 4 : cluster [DBG] last_changed 2026-04-16T19:20:05.536485+0000 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151145+0000 mon.vm01 (mon.0) 5 : cluster [DBG] created 2026-04-16T19:20:05.536485+0000 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151145+0000 mon.vm01 (mon.0) 5 : cluster [DBG] created 2026-04-16T19:20:05.536485+0000 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151148+0000 mon.vm01 (mon.0) 6 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151148+0000 mon.vm01 (mon.0) 6 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151153+0000 mon.vm01 (mon.0) 7 : cluster [DBG] election_strategy: 1 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151153+0000 mon.vm01 (mon.0) 7 : cluster [DBG] election_strategy: 1 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151156+0000 mon.vm01 (mon.0) 8 : cluster [DBG] 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.vm01 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151156+0000 mon.vm01 (mon.0) 8 : cluster [DBG] 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.vm01 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151437+0000 mon.vm01 (mon.0) 9 : cluster [DBG] fsmap 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151437+0000 mon.vm01 (mon.0) 9 : cluster [DBG] fsmap 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151448+0000 mon.vm01 (mon.0) 10 : cluster [DBG] osdmap e1: 0 total, 0 up, 0 in 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151448+0000 mon.vm01 (mon.0) 10 : cluster [DBG] osdmap e1: 0 total, 0 up, 0 in 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151950+0000 mon.vm01 (mon.0) 11 : cluster [DBG] mgrmap e1: no daemons active 2026-04-16T19:20:08.318 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 bash[28222]: cluster 2026-04-16T19:20:08.151950+0000 mon.vm01 (mon.0) 11 : cluster [DBG] mgrmap e1: no daemons active 2026-04-16T19:20:08.377 INFO:teuthology.orchestra.run.vm01.stdout:Wrote config to /etc/ceph/ceph.conf 2026-04-16T19:20:08.378 INFO:teuthology.orchestra.run.vm01.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-04-16T19:20:08.378 INFO:teuthology.orchestra.run.vm01.stdout:Creating mgr... 2026-04-16T19:20:08.378 INFO:teuthology.orchestra.run.vm01.stdout:Verifying port 0.0.0.0:9283 ... 2026-04-16T19:20:08.378 INFO:teuthology.orchestra.run.vm01.stdout:Verifying port 0.0.0.0:8765 ... 2026-04-16T19:20:08.378 INFO:teuthology.orchestra.run.vm01.stdout:Verifying port 0.0.0.0:8443 ... 2026-04-16T19:20:08.574 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:20:08.580 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mgr.vm01.nwhpas 2026-04-16T19:20:08.580 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to reset failed state of unit ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mgr.vm01.nwhpas.service: Unit ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mgr.vm01.nwhpas.service not loaded. 2026-04-16T19:20:08.787 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6.target.wants/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mgr.vm01.nwhpas.service → /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service. 2026-04-16T19:20:08.796 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-04-16T19:20:08.796 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to enable service . firewalld.service is not available 2026-04-16T19:20:08.796 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-04-16T19:20:08.796 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to open ports <[9283, 8765, 8443]>. firewalld.service is not available 2026-04-16T19:20:08.796 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr to start... 2026-04-16T19:20:08.796 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr... 2026-04-16T19:20:08.832 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:08 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:20:09.121 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3711bb6a-39c9-11f1-9688-8928648d55a6", 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 0, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-16T19:20:09.122 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-04-16T19:20:06:880574+0000", 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-04-16T19:20:06.881394+0000", 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-04-16T19:20:09.123 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (1/15)... 2026-04-16T19:20:09.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:09 vm01 bash[28222]: audit 2026-04-16T19:20:08.303121+0000 mon.vm01 (mon.0) 12 : audit [INF] from='client.? 192.168.123.101:0/758226330' entity='client.admin' 2026-04-16T19:20:09.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:09 vm01 bash[28222]: audit 2026-04-16T19:20:08.303121+0000 mon.vm01 (mon.0) 12 : audit [INF] from='client.? 192.168.123.101:0/758226330' entity='client.admin' 2026-04-16T19:20:09.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:09 vm01 bash[28222]: audit 2026-04-16T19:20:09.074398+0000 mon.vm01 (mon.0) 13 : audit [DBG] from='client.? 192.168.123.101:0/12319451' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:09.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:09 vm01 bash[28222]: audit 2026-04-16T19:20:09.074398+0000 mon.vm01 (mon.0) 13 : audit [DBG] from='client.? 192.168.123.101:0/12319451' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:11.431 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3711bb6a-39c9-11f1-9688-8928648d55a6", 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:11.432 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-04-16T19:20:06:880574+0000", 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-04-16T19:20:06.881394+0000", 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-04-16T19:20:11.433 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (2/15)... 2026-04-16T19:20:11.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:11 vm01 bash[28222]: audit 2026-04-16T19:20:11.393397+0000 mon.vm01 (mon.0) 14 : audit [DBG] from='client.? 192.168.123.101:0/2385515051' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:11.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:11 vm01 bash[28222]: audit 2026-04-16T19:20:11.393397+0000 mon.vm01 (mon.0) 14 : audit [DBG] from='client.? 192.168.123.101:0/2385515051' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3711bb6a-39c9-11f1-9688-8928648d55a6", 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 5, 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-16T19:20:13.756 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:13.757 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-04-16T19:20:06:880574+0000", 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-04-16T19:20:06.881394+0000", 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-04-16T19:20:13.758 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (3/15)... 2026-04-16T19:20:14.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:13 vm01 bash[28222]: audit 2026-04-16T19:20:13.700406+0000 mon.vm01 (mon.0) 15 : audit [DBG] from='client.? 192.168.123.101:0/893245341' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:14.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:13 vm01 bash[28222]: audit 2026-04-16T19:20:13.700406+0000 mon.vm01 (mon.0) 15 : audit [DBG] from='client.? 192.168.123.101:0/893245341' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3711bb6a-39c9-11f1-9688-8928648d55a6", 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 7, 2026-04-16T19:20:16.088 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-04-16T19:20:06:880574+0000", 2026-04-16T19:20:16.089 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-04-16T19:20:06.881394+0000", 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-04-16T19:20:16.090 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (4/15)... 2026-04-16T19:20:16.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:16 vm01 bash[28222]: audit 2026-04-16T19:20:16.024082+0000 mon.vm01 (mon.0) 16 : audit [DBG] from='client.? 192.168.123.101:0/3903442717' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:16.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:16 vm01 bash[28222]: audit 2026-04-16T19:20:16.024082+0000 mon.vm01 (mon.0) 16 : audit [DBG] from='client.? 192.168.123.101:0/3903442717' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3711bb6a-39c9-11f1-9688-8928648d55a6", 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:18.437 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 10, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-04-16T19:20:06:880574+0000", 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-04-16T19:20:06.881394+0000", 2026-04-16T19:20:18.438 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:18.439 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:18.439 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-16T19:20:18.439 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-04-16T19:20:18.439 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (5/15)... 2026-04-16T19:20:18.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:18 vm01 bash[28222]: audit 2026-04-16T19:20:18.364676+0000 mon.vm01 (mon.0) 17 : audit [DBG] from='client.? 192.168.123.101:0/3084150526' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:18.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:18 vm01 bash[28222]: audit 2026-04-16T19:20:18.364676+0000 mon.vm01 (mon.0) 17 : audit [DBG] from='client.? 192.168.123.101:0/3084150526' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: cluster 2026-04-16T19:20:18.880488+0000 mon.vm01 (mon.0) 18 : cluster [INF] Activating manager daemon vm01.nwhpas 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: cluster 2026-04-16T19:20:18.880488+0000 mon.vm01 (mon.0) 18 : cluster [INF] Activating manager daemon vm01.nwhpas 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: cluster 2026-04-16T19:20:18.884616+0000 mon.vm01 (mon.0) 19 : cluster [DBG] mgrmap e2: vm01.nwhpas(active, starting, since 0.00423707s) 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: cluster 2026-04-16T19:20:18.884616+0000 mon.vm01 (mon.0) 19 : cluster [DBG] mgrmap e2: vm01.nwhpas(active, starting, since 0.00423707s) 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.886733+0000 mon.vm01 (mon.0) 20 : audit [DBG] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix": "mds metadata"} : dispatch 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.886733+0000 mon.vm01 (mon.0) 20 : audit [DBG] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix": "mds metadata"} : dispatch 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.886850+0000 mon.vm01 (mon.0) 21 : audit [DBG] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata"} : dispatch 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.886850+0000 mon.vm01 (mon.0) 21 : audit [DBG] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata"} : dispatch 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.886922+0000 mon.vm01 (mon.0) 22 : audit [DBG] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata"} : dispatch 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.886922+0000 mon.vm01 (mon.0) 22 : audit [DBG] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata"} : dispatch 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.887008+0000 mon.vm01 (mon.0) 23 : audit [DBG] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.887008+0000 mon.vm01 (mon.0) 23 : audit [DBG] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.887748+0000 mon.vm01 (mon.0) 24 : audit [DBG] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm01.nwhpas", "id": "vm01.nwhpas"} : dispatch 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.887748+0000 mon.vm01 (mon.0) 24 : audit [DBG] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm01.nwhpas", "id": "vm01.nwhpas"} : dispatch 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: cluster 2026-04-16T19:20:18.893285+0000 mon.vm01 (mon.0) 25 : cluster [INF] Manager daemon vm01.nwhpas is now available 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: cluster 2026-04-16T19:20:18.893285+0000 mon.vm01 (mon.0) 25 : cluster [INF] Manager daemon vm01.nwhpas is now available 2026-04-16T19:20:19.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.903968+0000 mon.vm01 (mon.0) 26 : audit [INF] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/mirror_snapshot_schedule"} : dispatch 2026-04-16T19:20:19.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.903968+0000 mon.vm01 (mon.0) 26 : audit [INF] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/mirror_snapshot_schedule"} : dispatch 2026-04-16T19:20:19.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.907709+0000 mon.vm01 (mon.0) 27 : audit [INF] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:19.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.907709+0000 mon.vm01 (mon.0) 27 : audit [INF] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:19.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.908028+0000 mon.vm01 (mon.0) 28 : audit [INF] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/trash_purge_schedule"} : dispatch 2026-04-16T19:20:19.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.908028+0000 mon.vm01 (mon.0) 28 : audit [INF] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/trash_purge_schedule"} : dispatch 2026-04-16T19:20:19.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.910044+0000 mon.vm01 (mon.0) 29 : audit [INF] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:19.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.910044+0000 mon.vm01 (mon.0) 29 : audit [INF] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:19.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.911898+0000 mon.vm01 (mon.0) 30 : audit [INF] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:19.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:19 vm01 bash[28222]: audit 2026-04-16T19:20:18.911898+0000 mon.vm01 (mon.0) 30 : audit [INF] from='mgr.14100 192.168.123.101:0/3254694881' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3711bb6a-39c9-11f1-9688-8928648d55a6", 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "vm01" 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 12, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-16T19:20:20.884 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-04-16T19:20:06:880574+0000", 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-04-16T19:20:06.881394+0000", 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-04-16T19:20:20.885 INFO:teuthology.orchestra.run.vm01.stdout:mgr is available 2026-04-16T19:20:21.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:20 vm01 bash[28222]: cluster 2026-04-16T19:20:19.889238+0000 mon.vm01 (mon.0) 31 : cluster [DBG] mgrmap e3: vm01.nwhpas(active, since 1.00886s) 2026-04-16T19:20:21.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:20 vm01 bash[28222]: cluster 2026-04-16T19:20:19.889238+0000 mon.vm01 (mon.0) 31 : cluster [DBG] mgrmap e3: vm01.nwhpas(active, since 1.00886s) 2026-04-16T19:20:21.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:20 vm01 bash[28222]: audit 2026-04-16T19:20:20.839911+0000 mon.vm01 (mon.0) 32 : audit [DBG] from='client.? 192.168.123.101:0/762455546' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:21.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:20 vm01 bash[28222]: audit 2026-04-16T19:20:20.839911+0000 mon.vm01 (mon.0) 32 : audit [DBG] from='client.? 192.168.123.101:0/762455546' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [global] 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout fsid = 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.101:3300,v1:192.168.123.101:6789] 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [mgr] 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [osd] 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-04-16T19:20:21.273 INFO:teuthology.orchestra.run.vm01.stdout:Enabling cephadm module... 2026-04-16T19:20:22.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:21 vm01 bash[28222]: cluster 2026-04-16T19:20:20.898989+0000 mon.vm01 (mon.0) 33 : cluster [DBG] mgrmap e4: vm01.nwhpas(active, since 2s) 2026-04-16T19:20:22.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:21 vm01 bash[28222]: cluster 2026-04-16T19:20:20.898989+0000 mon.vm01 (mon.0) 33 : cluster [DBG] mgrmap e4: vm01.nwhpas(active, since 2s) 2026-04-16T19:20:22.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:21 vm01 bash[28222]: audit 2026-04-16T19:20:21.229189+0000 mon.vm01 (mon.0) 34 : audit [INF] from='client.? 192.168.123.101:0/3966323533' entity='client.admin' cmd={"prefix": "config assimilate-conf"} : dispatch 2026-04-16T19:20:22.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:21 vm01 bash[28222]: audit 2026-04-16T19:20:21.229189+0000 mon.vm01 (mon.0) 34 : audit [INF] from='client.? 192.168.123.101:0/3966323533' entity='client.admin' cmd={"prefix": "config assimilate-conf"} : dispatch 2026-04-16T19:20:22.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:21 vm01 bash[28222]: audit 2026-04-16T19:20:21.663189+0000 mon.vm01 (mon.0) 35 : audit [INF] from='client.? 192.168.123.101:0/3281324565' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "cephadm"} : dispatch 2026-04-16T19:20:22.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:21 vm01 bash[28222]: audit 2026-04-16T19:20:21.663189+0000 mon.vm01 (mon.0) 35 : audit [INF] from='client.? 192.168.123.101:0/3281324565' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "cephadm"} : dispatch 2026-04-16T19:20:22.393 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-04-16T19:20:22.393 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-04-16T19:20:22.393 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-16T19:20:22.393 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "active_name": "vm01.nwhpas", 2026-04-16T19:20:22.393 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-04-16T19:20:22.393 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-04-16T19:20:22.393 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for the mgr to restart... 2026-04-16T19:20:22.393 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr epoch 5... 2026-04-16T19:20:23.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:22 vm01 bash[28222]: audit 2026-04-16T19:20:21.899856+0000 mon.vm01 (mon.0) 36 : audit [INF] from='client.? 192.168.123.101:0/3281324565' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-04-16T19:20:23.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:22 vm01 bash[28222]: audit 2026-04-16T19:20:21.899856+0000 mon.vm01 (mon.0) 36 : audit [INF] from='client.? 192.168.123.101:0/3281324565' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-04-16T19:20:23.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:22 vm01 bash[28222]: cluster 2026-04-16T19:20:21.901718+0000 mon.vm01 (mon.0) 37 : cluster [DBG] mgrmap e5: vm01.nwhpas(active, since 3s) 2026-04-16T19:20:23.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:22 vm01 bash[28222]: cluster 2026-04-16T19:20:21.901718+0000 mon.vm01 (mon.0) 37 : cluster [DBG] mgrmap e5: vm01.nwhpas(active, since 3s) 2026-04-16T19:20:23.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:22 vm01 bash[28222]: audit 2026-04-16T19:20:22.325769+0000 mon.vm01 (mon.0) 38 : audit [DBG] from='client.? 192.168.123.101:0/2429258664' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-16T19:20:23.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:22 vm01 bash[28222]: audit 2026-04-16T19:20:22.325769+0000 mon.vm01 (mon.0) 38 : audit [DBG] from='client.? 192.168.123.101:0/2429258664' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: cluster 2026-04-16T19:20:31.772728+0000 mon.vm01 (mon.0) 39 : cluster [INF] Active manager daemon vm01.nwhpas restarted 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: cluster 2026-04-16T19:20:31.772728+0000 mon.vm01 (mon.0) 39 : cluster [INF] Active manager daemon vm01.nwhpas restarted 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: cluster 2026-04-16T19:20:31.772984+0000 mon.vm01 (mon.0) 40 : cluster [INF] Activating manager daemon vm01.nwhpas 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: cluster 2026-04-16T19:20:31.772984+0000 mon.vm01 (mon.0) 40 : cluster [INF] Activating manager daemon vm01.nwhpas 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: cluster 2026-04-16T19:20:31.777375+0000 mon.vm01 (mon.0) 41 : cluster [DBG] osdmap e2: 0 total, 0 up, 0 in 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: cluster 2026-04-16T19:20:31.777375+0000 mon.vm01 (mon.0) 41 : cluster [DBG] osdmap e2: 0 total, 0 up, 0 in 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: cluster 2026-04-16T19:20:31.777437+0000 mon.vm01 (mon.0) 42 : cluster [DBG] mgrmap e6: vm01.nwhpas(active, starting, since 0.00457308s) 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: cluster 2026-04-16T19:20:31.777437+0000 mon.vm01 (mon.0) 42 : cluster [DBG] mgrmap e6: vm01.nwhpas(active, starting, since 0.00457308s) 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: audit 2026-04-16T19:20:31.778462+0000 mon.vm01 (mon.0) 43 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: audit 2026-04-16T19:20:31.778462+0000 mon.vm01 (mon.0) 43 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: audit 2026-04-16T19:20:31.778997+0000 mon.vm01 (mon.0) 44 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm01.nwhpas", "id": "vm01.nwhpas"} : dispatch 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: audit 2026-04-16T19:20:31.778997+0000 mon.vm01 (mon.0) 44 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm01.nwhpas", "id": "vm01.nwhpas"} : dispatch 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: audit 2026-04-16T19:20:31.779760+0000 mon.vm01 (mon.0) 45 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "mds metadata"} : dispatch 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: audit 2026-04-16T19:20:31.779760+0000 mon.vm01 (mon.0) 45 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "mds metadata"} : dispatch 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: audit 2026-04-16T19:20:31.779826+0000 mon.vm01 (mon.0) 46 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata"} : dispatch 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: audit 2026-04-16T19:20:31.779826+0000 mon.vm01 (mon.0) 46 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata"} : dispatch 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: audit 2026-04-16T19:20:31.779978+0000 mon.vm01 (mon.0) 47 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata"} : dispatch 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: audit 2026-04-16T19:20:31.779978+0000 mon.vm01 (mon.0) 47 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata"} : dispatch 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: cluster 2026-04-16T19:20:31.784217+0000 mon.vm01 (mon.0) 48 : cluster [INF] Manager daemon vm01.nwhpas is now available 2026-04-16T19:20:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:31 vm01 bash[28222]: cluster 2026-04-16T19:20:31.784217+0000 mon.vm01 (mon.0) 48 : cluster [INF] Manager daemon vm01.nwhpas is now available 2026-04-16T19:20:32.857 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-04-16T19:20:32.857 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-04-16T19:20:32.857 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "initialized": true 2026-04-16T19:20:32.857 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-04-16T19:20:32.857 INFO:teuthology.orchestra.run.vm01.stdout:mgr epoch 5 is available 2026-04-16T19:20:32.857 INFO:teuthology.orchestra.run.vm01.stdout:Verifying orchestrator module is enabled... 2026-04-16T19:20:33.826 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stderr module 'orchestrator' is already enabled (always-on) 2026-04-16T19:20:33.826 INFO:teuthology.orchestra.run.vm01.stdout:Setting orchestrator backend to cephadm... 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.589238+0000 mon.vm01 (mon.0) 49 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.589238+0000 mon.vm01 (mon.0) 49 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.592291+0000 mon.vm01 (mon.0) 50 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.592291+0000 mon.vm01 (mon.0) 50 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: cephadm 2026-04-16T19:20:32.592737+0000 mgr.vm01.nwhpas (mgr.14124) 1 : cephadm [INF] Found migration_current of "None". Setting to last migration. 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: cephadm 2026-04-16T19:20:32.592737+0000 mgr.vm01.nwhpas (mgr.14124) 1 : cephadm [INF] Found migration_current of "None". Setting to last migration. 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.594378+0000 mon.vm01 (mon.0) 51 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.594378+0000 mon.vm01 (mon.0) 51 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.646831+0000 mon.vm01 (mon.0) 52 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.646831+0000 mon.vm01 (mon.0) 52 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.650388+0000 mon.vm01 (mon.0) 53 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.650388+0000 mon.vm01 (mon.0) 53 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.655726+0000 mon.vm01 (mon.0) 54 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.655726+0000 mon.vm01 (mon.0) 54 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.662291+0000 mon.vm01 (mon.0) 55 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/mirror_snapshot_schedule"} : dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.662291+0000 mon.vm01 (mon.0) 55 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/mirror_snapshot_schedule"} : dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.664285+0000 mon.vm01 (mon.0) 56 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/trash_purge_schedule"} : dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.664285+0000 mon.vm01 (mon.0) 56 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/trash_purge_schedule"} : dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: cluster 2026-04-16T19:20:32.781405+0000 mon.vm01 (mon.0) 57 : cluster [DBG] mgrmap e7: vm01.nwhpas(active, since 1.00853s) 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: cluster 2026-04-16T19:20:32.781405+0000 mon.vm01 (mon.0) 57 : cluster [DBG] mgrmap e7: vm01.nwhpas(active, since 1.00853s) 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.781687+0000 mgr.vm01.nwhpas (mgr.14124) 2 : audit [DBG] from='client.14128 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.781687+0000 mgr.vm01.nwhpas (mgr.14124) 2 : audit [DBG] from='client.14128 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.786033+0000 mgr.vm01.nwhpas (mgr.14124) 3 : audit [DBG] from='client.14128 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:32.786033+0000 mgr.vm01.nwhpas (mgr.14124) 3 : audit [DBG] from='client.14128 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:33.264559+0000 mon.vm01 (mon.0) 58 : audit [INF] from='client.? 192.168.123.101:0/44416258' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "orchestrator"} : dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:33.264559+0000 mon.vm01 (mon.0) 58 : audit [INF] from='client.? 192.168.123.101:0/44416258' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "orchestrator"} : dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:33.381325+0000 mon.vm01 (mon.0) 59 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:33.845 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:33 vm01 bash[28222]: audit 2026-04-16T19:20:33.381325+0000 mon.vm01 (mon.0) 59 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:34.650 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout value unchanged 2026-04-16T19:20:34.650 INFO:teuthology.orchestra.run.vm01.stdout:Generating ssh key... 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cephadm 2026-04-16T19:20:33.168783+0000 mgr.vm01.nwhpas (mgr.14124) 4 : cephadm [INF] [16/Apr/2026:19:20:33] ENGINE Bus STARTING 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cephadm 2026-04-16T19:20:33.168783+0000 mgr.vm01.nwhpas (mgr.14124) 4 : cephadm [INF] [16/Apr/2026:19:20:33] ENGINE Bus STARTING 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cephadm 2026-04-16T19:20:33.279484+0000 mgr.vm01.nwhpas (mgr.14124) 5 : cephadm [INF] [16/Apr/2026:19:20:33] ENGINE Serving on https://192.168.123.101:7150 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cephadm 2026-04-16T19:20:33.279484+0000 mgr.vm01.nwhpas (mgr.14124) 5 : cephadm [INF] [16/Apr/2026:19:20:33] ENGINE Serving on https://192.168.123.101:7150 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cephadm 2026-04-16T19:20:33.280310+0000 mgr.vm01.nwhpas (mgr.14124) 6 : cephadm [INF] [16/Apr/2026:19:20:33] ENGINE Client ('192.168.123.101', 49708) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cephadm 2026-04-16T19:20:33.280310+0000 mgr.vm01.nwhpas (mgr.14124) 6 : cephadm [INF] [16/Apr/2026:19:20:33] ENGINE Client ('192.168.123.101', 49708) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cephadm 2026-04-16T19:20:33.380640+0000 mgr.vm01.nwhpas (mgr.14124) 7 : cephadm [INF] [16/Apr/2026:19:20:33] ENGINE Serving on http://192.168.123.101:8765 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cephadm 2026-04-16T19:20:33.380640+0000 mgr.vm01.nwhpas (mgr.14124) 7 : cephadm [INF] [16/Apr/2026:19:20:33] ENGINE Serving on http://192.168.123.101:8765 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cephadm 2026-04-16T19:20:33.380703+0000 mgr.vm01.nwhpas (mgr.14124) 8 : cephadm [INF] [16/Apr/2026:19:20:33] ENGINE Bus STARTED 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cephadm 2026-04-16T19:20:33.380703+0000 mgr.vm01.nwhpas (mgr.14124) 8 : cephadm [INF] [16/Apr/2026:19:20:33] ENGINE Bus STARTED 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: audit 2026-04-16T19:20:33.781646+0000 mon.vm01 (mon.0) 60 : audit [INF] from='client.? 192.168.123.101:0/44416258' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "orchestrator"}]': finished 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: audit 2026-04-16T19:20:33.781646+0000 mon.vm01 (mon.0) 60 : audit [INF] from='client.? 192.168.123.101:0/44416258' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "orchestrator"}]': finished 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cluster 2026-04-16T19:20:33.784138+0000 mon.vm01 (mon.0) 61 : cluster [DBG] mgrmap e8: vm01.nwhpas(active, since 2s) 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: cluster 2026-04-16T19:20:33.784138+0000 mon.vm01 (mon.0) 61 : cluster [DBG] mgrmap e8: vm01.nwhpas(active, since 2s) 2026-04-16T19:20:34.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: audit 2026-04-16T19:20:34.187012+0000 mon.vm01 (mon.0) 62 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:34.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: audit 2026-04-16T19:20:34.187012+0000 mon.vm01 (mon.0) 62 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:34.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: audit 2026-04-16T19:20:34.192977+0000 mon.vm01 (mon.0) 63 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:34.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:34 vm01 bash[28222]: audit 2026-04-16T19:20:34.192977+0000 mon.vm01 (mon.0) 63 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:35.669 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCkBQTQO169TZqfttYYufnyFLyqPwEq4wjbDZ7THNt61GVTMjGHb7iEqVBuanqz2bvmgyU5Ki0+4kkp9KZ/JQh7t9tsOghZOwhSKXOwPRdyg7rH/Iq26nXa3vQrI8GMCfNVh0iRDxWeHuvH+4hiOtTtT9Zj+7ZrEfGD11CHW1WeuDg4W4QjPWTgM8Qhg5081nS4EfATk/e3gibraErvm/FS5nZZa2FyQ3KtzxWtJNQaKT8+eBF3ltRWhcBrYzgWhq+lBRlCNvI1UoqlaQxSlMalUJ2ooaCOk5wwCIeyJZGv0y3IgANZpQzD7h3n/4ufVNzpQYsNCXh5VhCU3AiHxYNUkXpTdB+2Ceno8pN5lyPb1UGnNmuCabt6/h/VqKn/hhb/adcxt3S4q/bPX5dLGH85Wzd0ZWwFdFR5yxHP8E0RuU2vlLnOzT6FNrytZzecnADYAnkJ4lRzH7IgHnhS/l/ifIT3B9RkERtAjWnkuE0O3nxg1Iappa4VCjK99xWcZzk= ceph-3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:35.669 INFO:teuthology.orchestra.run.vm01.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-04-16T19:20:35.669 INFO:teuthology.orchestra.run.vm01.stdout:Adding key to root@localhost authorized_keys... 2026-04-16T19:20:35.669 INFO:teuthology.orchestra.run.vm01.stdout:Adding host vm01... 2026-04-16T19:20:35.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:35 vm01 bash[28222]: audit 2026-04-16T19:20:34.183083+0000 mgr.vm01.nwhpas (mgr.14124) 9 : audit [DBG] from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:35.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:35 vm01 bash[28222]: audit 2026-04-16T19:20:34.183083+0000 mgr.vm01.nwhpas (mgr.14124) 9 : audit [DBG] from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:35.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:35 vm01 bash[28222]: audit 2026-04-16T19:20:34.602783+0000 mgr.vm01.nwhpas (mgr.14124) 10 : audit [DBG] from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:35.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:35 vm01 bash[28222]: audit 2026-04-16T19:20:34.602783+0000 mgr.vm01.nwhpas (mgr.14124) 10 : audit [DBG] from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:35.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:35 vm01 bash[28222]: audit 2026-04-16T19:20:35.214450+0000 mon.vm01 (mon.0) 64 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:35.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:35 vm01 bash[28222]: audit 2026-04-16T19:20:35.214450+0000 mon.vm01 (mon.0) 64 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:35.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:35 vm01 bash[28222]: audit 2026-04-16T19:20:35.216959+0000 mon.vm01 (mon.0) 65 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:35.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:35 vm01 bash[28222]: audit 2026-04-16T19:20:35.216959+0000 mon.vm01 (mon.0) 65 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:36 vm01 bash[28222]: audit 2026-04-16T19:20:35.015909+0000 mgr.vm01.nwhpas (mgr.14124) 11 : audit [DBG] from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:36 vm01 bash[28222]: audit 2026-04-16T19:20:35.015909+0000 mgr.vm01.nwhpas (mgr.14124) 11 : audit [DBG] from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:36 vm01 bash[28222]: cephadm 2026-04-16T19:20:35.016108+0000 mgr.vm01.nwhpas (mgr.14124) 12 : cephadm [INF] Generating ssh key... 2026-04-16T19:20:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:36 vm01 bash[28222]: cephadm 2026-04-16T19:20:35.016108+0000 mgr.vm01.nwhpas (mgr.14124) 12 : cephadm [INF] Generating ssh key... 2026-04-16T19:20:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:36 vm01 bash[28222]: audit 2026-04-16T19:20:35.624738+0000 mgr.vm01.nwhpas (mgr.14124) 13 : audit [DBG] from='client.14144 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:36 vm01 bash[28222]: audit 2026-04-16T19:20:35.624738+0000 mgr.vm01.nwhpas (mgr.14124) 13 : audit [DBG] from='client.14144 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:37.788 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:37 vm01 bash[28222]: audit 2026-04-16T19:20:36.045838+0000 mgr.vm01.nwhpas (mgr.14124) 14 : audit [DBG] from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm01", "addr": "192.168.123.101", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:37.788 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:37 vm01 bash[28222]: audit 2026-04-16T19:20:36.045838+0000 mgr.vm01.nwhpas (mgr.14124) 14 : audit [DBG] from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm01", "addr": "192.168.123.101", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:37.788 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:37 vm01 bash[28222]: cephadm 2026-04-16T19:20:36.944823+0000 mgr.vm01.nwhpas (mgr.14124) 15 : cephadm [INF] Deploying cephadm binary to vm01 2026-04-16T19:20:37.788 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:37 vm01 bash[28222]: cephadm 2026-04-16T19:20:36.944823+0000 mgr.vm01.nwhpas (mgr.14124) 15 : cephadm [INF] Deploying cephadm binary to vm01 2026-04-16T19:20:38.979 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Added host 'vm01' with addr '192.168.123.101' 2026-04-16T19:20:38.980 INFO:teuthology.orchestra.run.vm01.stdout:Deploying mon service with default placement... 2026-04-16T19:20:39.414 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-04-16T19:20:39.414 INFO:teuthology.orchestra.run.vm01.stdout:Deploying mgr service with default placement... 2026-04-16T19:20:39.875 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-04-16T19:20:39.875 INFO:teuthology.orchestra.run.vm01.stdout:Deploying crash service with default placement... 2026-04-16T19:20:39.955 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:39 vm01 bash[28222]: audit 2026-04-16T19:20:38.928029+0000 mon.vm01 (mon.0) 66 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:39.955 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:39 vm01 bash[28222]: audit 2026-04-16T19:20:38.928029+0000 mon.vm01 (mon.0) 66 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:39.955 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:39 vm01 bash[28222]: cephadm 2026-04-16T19:20:38.928374+0000 mgr.vm01.nwhpas (mgr.14124) 16 : cephadm [INF] Added host vm01 2026-04-16T19:20:39.955 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:39 vm01 bash[28222]: cephadm 2026-04-16T19:20:38.928374+0000 mgr.vm01.nwhpas (mgr.14124) 16 : cephadm [INF] Added host vm01 2026-04-16T19:20:39.955 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:39 vm01 bash[28222]: audit 2026-04-16T19:20:38.928711+0000 mon.vm01 (mon.0) 67 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:39.955 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:39 vm01 bash[28222]: audit 2026-04-16T19:20:38.928711+0000 mon.vm01 (mon.0) 67 : audit [DBG] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:39.955 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:39 vm01 bash[28222]: audit 2026-04-16T19:20:39.367649+0000 mon.vm01 (mon.0) 68 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:39.955 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:39 vm01 bash[28222]: audit 2026-04-16T19:20:39.367649+0000 mon.vm01 (mon.0) 68 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:39.955 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:39 vm01 bash[28222]: audit 2026-04-16T19:20:39.800103+0000 mon.vm01 (mon.0) 69 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:39.955 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:39 vm01 bash[28222]: audit 2026-04-16T19:20:39.800103+0000 mon.vm01 (mon.0) 69 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:40.335 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled crash update... 2026-04-16T19:20:40.335 INFO:teuthology.orchestra.run.vm01.stdout:Deploying ceph-exporter service with default placement... 2026-04-16T19:20:40.766 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled ceph-exporter update... 2026-04-16T19:20:40.766 INFO:teuthology.orchestra.run.vm01.stdout:Deploying prometheus service with default placement... 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:39.363418+0000 mgr.vm01.nwhpas (mgr.14124) 17 : audit [DBG] from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:39.363418+0000 mgr.vm01.nwhpas (mgr.14124) 17 : audit [DBG] from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: cephadm 2026-04-16T19:20:39.364624+0000 mgr.vm01.nwhpas (mgr.14124) 18 : cephadm [INF] Saving service mon spec with placement count:5 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: cephadm 2026-04-16T19:20:39.364624+0000 mgr.vm01.nwhpas (mgr.14124) 18 : cephadm [INF] Saving service mon spec with placement count:5 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:39.796307+0000 mgr.vm01.nwhpas (mgr.14124) 19 : audit [DBG] from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:39.796307+0000 mgr.vm01.nwhpas (mgr.14124) 19 : audit [DBG] from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: cephadm 2026-04-16T19:20:39.796994+0000 mgr.vm01.nwhpas (mgr.14124) 20 : cephadm [INF] Saving service mgr spec with placement count:2 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: cephadm 2026-04-16T19:20:39.796994+0000 mgr.vm01.nwhpas (mgr.14124) 20 : cephadm [INF] Saving service mgr spec with placement count:2 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:39.990649+0000 mon.vm01 (mon.0) 70 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:39.990649+0000 mon.vm01 (mon.0) 70 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:40.280916+0000 mon.vm01 (mon.0) 71 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:40.280916+0000 mon.vm01 (mon.0) 71 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:40.326303+0000 mon.vm01 (mon.0) 72 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:40.326303+0000 mon.vm01 (mon.0) 72 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:40.709037+0000 mon.vm01 (mon.0) 73 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:41.153 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:40 vm01 bash[28222]: audit 2026-04-16T19:20:40.709037+0000 mon.vm01 (mon.0) 73 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:41.190 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled prometheus update... 2026-04-16T19:20:41.190 INFO:teuthology.orchestra.run.vm01.stdout:Deploying grafana service with default placement... 2026-04-16T19:20:41.634 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled grafana update... 2026-04-16T19:20:41.634 INFO:teuthology.orchestra.run.vm01.stdout:Deploying node-exporter service with default placement... 2026-04-16T19:20:42.044 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled node-exporter update... 2026-04-16T19:20:42.044 INFO:teuthology.orchestra.run.vm01.stdout:Deploying alertmanager service with default placement... 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: audit 2026-04-16T19:20:40.273800+0000 mgr.vm01.nwhpas (mgr.14124) 21 : audit [DBG] from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: audit 2026-04-16T19:20:40.273800+0000 mgr.vm01.nwhpas (mgr.14124) 21 : audit [DBG] from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: cephadm 2026-04-16T19:20:40.274493+0000 mgr.vm01.nwhpas (mgr.14124) 22 : cephadm [INF] Saving service crash spec with placement * 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: cephadm 2026-04-16T19:20:40.274493+0000 mgr.vm01.nwhpas (mgr.14124) 22 : cephadm [INF] Saving service crash spec with placement * 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: audit 2026-04-16T19:20:40.704913+0000 mgr.vm01.nwhpas (mgr.14124) 23 : audit [DBG] from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: audit 2026-04-16T19:20:40.704913+0000 mgr.vm01.nwhpas (mgr.14124) 23 : audit [DBG] from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: cephadm 2026-04-16T19:20:40.705881+0000 mgr.vm01.nwhpas (mgr.14124) 24 : cephadm [INF] Saving service ceph-exporter spec with placement * 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: cephadm 2026-04-16T19:20:40.705881+0000 mgr.vm01.nwhpas (mgr.14124) 24 : cephadm [INF] Saving service ceph-exporter spec with placement * 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: audit 2026-04-16T19:20:41.140462+0000 mon.vm01 (mon.0) 74 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: audit 2026-04-16T19:20:41.140462+0000 mon.vm01 (mon.0) 74 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: audit 2026-04-16T19:20:41.564384+0000 mon.vm01 (mon.0) 75 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:41 vm01 bash[28222]: audit 2026-04-16T19:20:41.564384+0000 mon.vm01 (mon.0) 75 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:42.479 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled alertmanager update... 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: audit 2026-04-16T19:20:41.136708+0000 mgr.vm01.nwhpas (mgr.14124) 25 : audit [DBG] from='client.14156 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: audit 2026-04-16T19:20:41.136708+0000 mgr.vm01.nwhpas (mgr.14124) 25 : audit [DBG] from='client.14156 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: cephadm 2026-04-16T19:20:41.137408+0000 mgr.vm01.nwhpas (mgr.14124) 26 : cephadm [INF] Saving service prometheus spec with placement count:1 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: cephadm 2026-04-16T19:20:41.137408+0000 mgr.vm01.nwhpas (mgr.14124) 26 : cephadm [INF] Saving service prometheus spec with placement count:1 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: audit 2026-04-16T19:20:41.560279+0000 mgr.vm01.nwhpas (mgr.14124) 27 : audit [DBG] from='client.14158 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: audit 2026-04-16T19:20:41.560279+0000 mgr.vm01.nwhpas (mgr.14124) 27 : audit [DBG] from='client.14158 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: cephadm 2026-04-16T19:20:41.561222+0000 mgr.vm01.nwhpas (mgr.14124) 28 : cephadm [INF] Saving service grafana spec with placement count:1 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: cephadm 2026-04-16T19:20:41.561222+0000 mgr.vm01.nwhpas (mgr.14124) 28 : cephadm [INF] Saving service grafana spec with placement count:1 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: audit 2026-04-16T19:20:41.999750+0000 mon.vm01 (mon.0) 76 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: audit 2026-04-16T19:20:41.999750+0000 mon.vm01 (mon.0) 76 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: audit 2026-04-16T19:20:42.407011+0000 mon.vm01 (mon.0) 77 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: audit 2026-04-16T19:20:42.407011+0000 mon.vm01 (mon.0) 77 : audit [INF] from='mgr.14124 192.168.123.101:0/574841374' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: audit 2026-04-16T19:20:42.841779+0000 mon.vm01 (mon.0) 78 : audit [INF] from='client.? 192.168.123.101:0/270762634' entity='client.admin' 2026-04-16T19:20:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:42 vm01 bash[28222]: audit 2026-04-16T19:20:42.841779+0000 mon.vm01 (mon.0) 78 : audit [INF] from='client.? 192.168.123.101:0/270762634' entity='client.admin' 2026-04-16T19:20:43.321 INFO:teuthology.orchestra.run.vm01.stdout:Enabling the dashboard module... 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: audit 2026-04-16T19:20:41.996320+0000 mgr.vm01.nwhpas (mgr.14124) 29 : audit [DBG] from='client.14160 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: audit 2026-04-16T19:20:41.996320+0000 mgr.vm01.nwhpas (mgr.14124) 29 : audit [DBG] from='client.14160 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: cephadm 2026-04-16T19:20:41.997013+0000 mgr.vm01.nwhpas (mgr.14124) 30 : cephadm [INF] Saving service node-exporter spec with placement * 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: cephadm 2026-04-16T19:20:41.997013+0000 mgr.vm01.nwhpas (mgr.14124) 30 : cephadm [INF] Saving service node-exporter spec with placement * 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: audit 2026-04-16T19:20:42.403135+0000 mgr.vm01.nwhpas (mgr.14124) 31 : audit [DBG] from='client.14162 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: audit 2026-04-16T19:20:42.403135+0000 mgr.vm01.nwhpas (mgr.14124) 31 : audit [DBG] from='client.14162 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: cephadm 2026-04-16T19:20:42.403839+0000 mgr.vm01.nwhpas (mgr.14124) 32 : cephadm [INF] Saving service alertmanager spec with placement count:1 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: cephadm 2026-04-16T19:20:42.403839+0000 mgr.vm01.nwhpas (mgr.14124) 32 : cephadm [INF] Saving service alertmanager spec with placement count:1 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: audit 2026-04-16T19:20:43.271571+0000 mon.vm01 (mon.0) 79 : audit [INF] from='client.? 192.168.123.101:0/2127966358' entity='client.admin' 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: audit 2026-04-16T19:20:43.271571+0000 mon.vm01 (mon.0) 79 : audit [INF] from='client.? 192.168.123.101:0/2127966358' entity='client.admin' 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: audit 2026-04-16T19:20:43.732615+0000 mon.vm01 (mon.0) 80 : audit [INF] from='client.? 192.168.123.101:0/1308951158' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "dashboard"} : dispatch 2026-04-16T19:20:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:43 vm01 bash[28222]: audit 2026-04-16T19:20:43.732615+0000 mon.vm01 (mon.0) 80 : audit [INF] from='client.? 192.168.123.101:0/1308951158' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "dashboard"} : dispatch 2026-04-16T19:20:44.822 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-04-16T19:20:44.822 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-04-16T19:20:44.822 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-16T19:20:44.822 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "active_name": "vm01.nwhpas", 2026-04-16T19:20:44.822 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-04-16T19:20:44.823 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-04-16T19:20:44.823 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for the mgr to restart... 2026-04-16T19:20:44.823 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr epoch 9... 2026-04-16T19:20:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:45 vm01 bash[28222]: audit 2026-04-16T19:20:44.273102+0000 mon.vm01 (mon.0) 81 : audit [INF] from='client.? 192.168.123.101:0/1308951158' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-04-16T19:20:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:45 vm01 bash[28222]: audit 2026-04-16T19:20:44.273102+0000 mon.vm01 (mon.0) 81 : audit [INF] from='client.? 192.168.123.101:0/1308951158' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-04-16T19:20:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:45 vm01 bash[28222]: cluster 2026-04-16T19:20:44.275170+0000 mon.vm01 (mon.0) 82 : cluster [DBG] mgrmap e9: vm01.nwhpas(active, since 12s) 2026-04-16T19:20:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:45 vm01 bash[28222]: cluster 2026-04-16T19:20:44.275170+0000 mon.vm01 (mon.0) 82 : cluster [DBG] mgrmap e9: vm01.nwhpas(active, since 12s) 2026-04-16T19:20:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:45 vm01 bash[28222]: audit 2026-04-16T19:20:44.751959+0000 mon.vm01 (mon.0) 83 : audit [DBG] from='client.? 192.168.123.101:0/2428229706' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-16T19:20:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:45 vm01 bash[28222]: audit 2026-04-16T19:20:44.751959+0000 mon.vm01 (mon.0) 83 : audit [DBG] from='client.? 192.168.123.101:0/2428229706' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: cluster 2026-04-16T19:20:54.810722+0000 mon.vm01 (mon.0) 84 : cluster [INF] Active manager daemon vm01.nwhpas restarted 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: cluster 2026-04-16T19:20:54.810722+0000 mon.vm01 (mon.0) 84 : cluster [INF] Active manager daemon vm01.nwhpas restarted 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: cluster 2026-04-16T19:20:54.810949+0000 mon.vm01 (mon.0) 85 : cluster [INF] Activating manager daemon vm01.nwhpas 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: cluster 2026-04-16T19:20:54.810949+0000 mon.vm01 (mon.0) 85 : cluster [INF] Activating manager daemon vm01.nwhpas 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: cluster 2026-04-16T19:20:54.815024+0000 mon.vm01 (mon.0) 86 : cluster [DBG] osdmap e3: 0 total, 0 up, 0 in 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: cluster 2026-04-16T19:20:54.815024+0000 mon.vm01 (mon.0) 86 : cluster [DBG] osdmap e3: 0 total, 0 up, 0 in 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: cluster 2026-04-16T19:20:54.815095+0000 mon.vm01 (mon.0) 87 : cluster [DBG] mgrmap e10: vm01.nwhpas(active, starting, since 0.00424771s) 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: cluster 2026-04-16T19:20:54.815095+0000 mon.vm01 (mon.0) 87 : cluster [DBG] mgrmap e10: vm01.nwhpas(active, starting, since 0.00424771s) 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: audit 2026-04-16T19:20:54.815831+0000 mon.vm01 (mon.0) 88 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: audit 2026-04-16T19:20:54.815831+0000 mon.vm01 (mon.0) 88 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: audit 2026-04-16T19:20:54.816665+0000 mon.vm01 (mon.0) 89 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm01.nwhpas", "id": "vm01.nwhpas"} : dispatch 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: audit 2026-04-16T19:20:54.816665+0000 mon.vm01 (mon.0) 89 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm01.nwhpas", "id": "vm01.nwhpas"} : dispatch 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: audit 2026-04-16T19:20:54.817102+0000 mon.vm01 (mon.0) 90 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "mds metadata"} : dispatch 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: audit 2026-04-16T19:20:54.817102+0000 mon.vm01 (mon.0) 90 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "mds metadata"} : dispatch 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: audit 2026-04-16T19:20:54.817168+0000 mon.vm01 (mon.0) 91 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata"} : dispatch 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: audit 2026-04-16T19:20:54.817168+0000 mon.vm01 (mon.0) 91 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata"} : dispatch 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: audit 2026-04-16T19:20:54.817225+0000 mon.vm01 (mon.0) 92 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata"} : dispatch 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: audit 2026-04-16T19:20:54.817225+0000 mon.vm01 (mon.0) 92 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata"} : dispatch 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: cluster 2026-04-16T19:20:54.821164+0000 mon.vm01 (mon.0) 93 : cluster [INF] Manager daemon vm01.nwhpas is now available 2026-04-16T19:20:55.186 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:54 vm01 bash[28222]: cluster 2026-04-16T19:20:54.821164+0000 mon.vm01 (mon.0) 93 : cluster [INF] Manager daemon vm01.nwhpas is now available 2026-04-16T19:20:55.897 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-04-16T19:20:55.897 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-04-16T19:20:55.897 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "initialized": true 2026-04-16T19:20:55.897 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-04-16T19:20:55.897 INFO:teuthology.orchestra.run.vm01.stdout:mgr epoch 9 is available 2026-04-16T19:20:55.897 INFO:teuthology.orchestra.run.vm01.stdout:Using certmgr to generate dashboard self-signed certificate... 2026-04-16T19:20:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:55 vm01 bash[28222]: audit 2026-04-16T19:20:55.084398+0000 mon.vm01 (mon.0) 94 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:55 vm01 bash[28222]: audit 2026-04-16T19:20:55.084398+0000 mon.vm01 (mon.0) 94 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:20:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:55 vm01 bash[28222]: audit 2026-04-16T19:20:55.100360+0000 mon.vm01 (mon.0) 95 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/mirror_snapshot_schedule"} : dispatch 2026-04-16T19:20:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:55 vm01 bash[28222]: audit 2026-04-16T19:20:55.100360+0000 mon.vm01 (mon.0) 95 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/mirror_snapshot_schedule"} : dispatch 2026-04-16T19:20:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:55 vm01 bash[28222]: audit 2026-04-16T19:20:55.104150+0000 mon.vm01 (mon.0) 96 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/trash_purge_schedule"} : dispatch 2026-04-16T19:20:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:55 vm01 bash[28222]: audit 2026-04-16T19:20:55.104150+0000 mon.vm01 (mon.0) 96 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/trash_purge_schedule"} : dispatch 2026-04-16T19:20:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:55 vm01 bash[28222]: cluster 2026-04-16T19:20:55.820059+0000 mon.vm01 (mon.0) 97 : cluster [DBG] mgrmap e11: vm01.nwhpas(active, since 1.00921s) 2026-04-16T19:20:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:55 vm01 bash[28222]: cluster 2026-04-16T19:20:55.820059+0000 mon.vm01 (mon.0) 97 : cluster [DBG] mgrmap e11: vm01.nwhpas(active, since 1.00921s) 2026-04-16T19:20:56.806 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout {"cert": "-----BEGIN CERTIFICATE-----\nMIIE+TCCAuGgAwIBAgIUdVT7nAQbSL/NhrxRuQDCfLH5hlswDQYJKoZIhvcNAQEL\nBQAwFzEVMBMGA1UEAwwMY2VwaGFkbS1yb290MB4XDTI2MDQxNjE5MjA1NloXDTI5\nMDQxNTE5MjA1NlowGjEYMBYGA1UEAwwPMTkyLjE2OC4xMjMuMTAxMIICIjANBgkq\nhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtmGujgAyoSldKBkg9NLChfZaUaPZArUT\ne2GiLXiZotoOE02qdE80yZM7VX6v+9IC/HRfMWqyOICPU7a+ITFSpG8qTSiqevpT\naK3anRTQCjGarGgMXsY4QhxYzycaSJ0qQRGqrHTCNJxYbIiODxbPBcxfrKn7LRaV\nPdgYoITFJxw/R6LOspSoX4dMU37h4b8kNbTT+ETCnaFAu2+73Od+Uyt+Eu7HIddO\nzK2umIWXa72vrQU9R1oDBIg5G8K7U0qqxUXPEonxs8TW+GCaRoCqfZUq17WmQ/hR\nnEpv4wpoyRhkjOA1mZulY64mMv2GAIOimq9TSwd/tj5BLWnA4TAhKEgsFfsqWPsx\nJKS5H0kzcH5yaJvOat07ftLN8AGArP4Jha6LcrySUatnx+EwurmzUe1Y/UBTopbt\nqGfU0eqtshIygaoWrt1tO5Jdk9EuaxVlbxl+r2o5QdQNJpfq3sHIea20cgTXbflH\nva43o55Sn1lq3ePZ+GmMVGPgezUh9OzBOLCVZBQ99IYeHM9AYDAG6491Fkoc3c9S\n0n4OmziOOI/yVlYzDQDzSWf0JE4ctALLs2fmyJ9V5P5vtK1PDfaYr5MQQh+8Onu1\njlE9q5W2F8oE9PzZiP+dYv3n6nPrhdmNUprAA+tCrmeHpZOs/kjrjAW7Z8JjAa+R\nf/yjFdPB3s8CAwEAAaM6MDgwKAYDVR0RBCEwH4IEdm0wMYIRZGFzaGJvYXJkX3Nl\ncnZlcnOHBMCoe2UwDAYDVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAgEALzE4\nFfZgxMVdviAQ5v4uNneu1lLTB0BjXVHsqLvRjG8v0u3BLQdB0f7GB9Cl1BuLzq2+\nzRyuaas3tYGb605/Bxa99FqVSkag7C9o1T2oaCqqkJ7vd0ymEJsFQOzw3OaXnoQk\n3P9aaEfrGR1bTFPg6PsrmV4fIFI5g7/DIY65q65ojBYU6XXAcN1hI8StcmD4Hb6P\n8Wq7R1X9M6zBv+Vi30jcq2YYRolH7YGQCJcAsqZM70ovTJByNEDDVLqVVkqa1b16\nALphQcpku6YJUznMP8iianzyIDXAMxSsHGEZLTwzReL2wHIh+TiCQniE5KTXR1lN\nAoZvTckSKZDDykylbAPWcE2w/D7rxz1WJ5V0x6q15v4ugAxHSRo9aDZHxmrxKoGw\n1B22ge+9ixfraGpjezT23E2LKYMz7T35fA8Z7rliYjHrOJrHdbdQhbj+To2HM3Xm\nSJABP01srByVUysYr77WEJDppzpDarmTJ7QCqKW85HN32qLSnNqUxqoNOe1xm/Nl\n2lhYOBCgSp9VIbTj5Sbds2vt2jR0X/mNAWHjr6cxXUh4YfIihwvsrICJ2/ucLhTt\nEdAJm+bGI+jpPPs16ACtU6wuq4HD4uHjKa4tbWy+hg1nTJVh02NSWMKovGDMMVCS\nNuQt85+op94uCCDzKMk9h5gptnIb8+3gOq9MgMs=\n-----END CERTIFICATE-----\n", "key": "-----BEGIN RSA PRIVATE KEY-----\nMIIJKgIBAAKCAgEAtmGujgAyoSldKBkg9NLChfZaUaPZArUTe2GiLXiZotoOE02q\ndE80yZM7VX6v+9IC/HRfMWqyOICPU7a+ITFSpG8qTSiqevpTaK3anRTQCjGarGgM\nXsY4QhxYzycaSJ0qQRGqrHTCNJxYbIiODxbPBcxfrKn7LRaVPdgYoITFJxw/R6LO\nspSoX4dMU37h4b8kNbTT+ETCnaFAu2+73Od+Uyt+Eu7HIddOzK2umIWXa72vrQU9\nR1oDBIg5G8K7U0qqxUXPEonxs8TW+GCaRoCqfZUq17WmQ/hRnEpv4wpoyRhkjOA1\nmZulY64mMv2GAIOimq9TSwd/tj5BLWnA4TAhKEgsFfsqWPsxJKS5H0kzcH5yaJvO\nat07ftLN8AGArP4Jha6LcrySUatnx+EwurmzUe1Y/UBTopbtqGfU0eqtshIygaoW\nrt1tO5Jdk9EuaxVlbxl+r2o5QdQNJpfq3sHIea20cgTXbflHva43o55Sn1lq3ePZ\n+GmMVGPgezUh9OzBOLCVZBQ99IYeHM9AYDAG6491Fkoc3c9S0n4OmziOOI/yVlYz\nDQDzSWf0JE4ctALLs2fmyJ9V5P5vtK1PDfaYr5MQQh+8Onu1jlE9q5W2F8oE9PzZ\niP+dYv3n6nPrhdmNUprAA+tCrmeHpZOs/kjrjAW7Z8JjAa+Rf/yjFdPB3s8CAwEA\nAQKCAgA4VRqX1LrOqZqTAJP72r9npkyGa+/jFLAES4dOuTHnKafwUWxlSXO6EUZh\n5WQRCXCPrpgaiOSsbBUZG9hFI1lt3cI4m+dwXbJ2okBkhWGuzFyEhQzRFzOYQ+4w\nadkF9blv6LeJVfUIUARF3bXXNknSdipVCBnDdva45+7PYcMNjf2DsGdcpGsPOpia\nL5YwZpwWFJjmVcKw8nYujtf9ZulPYh8qVONf8gNuV/UWOiknh6rj67reN5C4DSVq\njC7VDrqJfUL07HOh/ZoSlLACN8qxSGo3v20wEFqQxjMh8vyjpGumUbhsjjU7ATbe\nSA1kSkRmt+GnypSpYzg5gTEovPd4Vivc6eh8RzQ+djFWlTyWe8Db7sScqnugG0ZX\n4L+4XNCd2hIASw+Qs0qzlj0+5+923K0oijf6Qgmnd24WHZ/hxx118OBmecrvCm3q\nB+Vvwh3wqIZZWC6S/bn1r/GVNGC86LdtZcoGgwvKndOglqrO/91IY9TxdsEtMLBZ\nX3nlZ/ZE6kNb5XSYOTsfuGaKSmobXNmR2Pa4dCP2XBMNpEb+mRQcL3F1pykdG33+\nNgHuxBdFzhNUugyE2AylbagFFYU866PBNqDNq+CkZR1GUciDo4e5JRs3R075PST0\nJ56o5LqLgilWjaTjdGtoggMrc45cuInw+KdCrN9KPADmQ8u7nQKCAQEA7MbBU/Cw\nIsXgGO0diS8g8eTeYiMKrKzVDnC6+oEYMqCQ4kHa473wtsHFkU5wSJ4LRaCKhxqB\n0UdeCLfbYNF/TFgyqj4fsonwpwXKOvta9ApuDGxAIuO3AfsR15aK/s0hJgczOehd\nXjAdzTt8njo7BEa2RTtpw9C3/b617CEwNeMsEU0mcxjFRfN9AWpUCPiFWC5dQkLx\nXYIqNQFYnDcMSyYA4gU3dU/XUhQHFCA928n3lAv+Y55d8D9f3sua9eTFImvoMqbG\nsTAmfhOkDHU8K/ZEkyeEf+MVZTObXbo2h7exV44Bpzszd2uzP/ifL8A702H+gkTI\nx5Vx1ShPGFVtnQKCAQEAxTBdildvCptkfGvM2r5PBrV+pUkeTKROiQoMDqx0DUsD\nXvPgNdw3Y005hZPUPmc8OEpgDWLujU9QJMed0cz6tDUpVvbmg/wWqkBKW358WNpT\nwrqBDZpp7dy+r4f+RU5uRSnohVI/sdvXenfTOaUsJCt3tzz6adSGini6cV/CS1ZB\n4hjr1uzfgMYdkyPCPv3IzbJMkPCwSuIlzy7xHIgau7SsxaJkEMKY6MuPjwICjIie\nMsdcYsq+2gPfSkSce7gXBWIqthbvO5wibezvK++NscgWQJOXVsfvhDoZuSJv7JYL\nvQkDEavD8XUwS+5TQ7cEdQOaefRiC0nGI/MRY6MIWwKCAQEA50GEh0B4N64+HvpQ\nM7GlIrzfesgw38/6U51E8e/uO1xZxbJ+YJseJNGC7lXHExXRepK/Wg9Ipadnr2or\nSRe0llM6QM5mVRGmyuq7xvnCrsIVE+sShUEVeEjI92CgpxagfqzfmnpZ3ult4gps\nv3muNJH/6gSKW2danOmMc3FCzg7v9oH8wMN7wcN730sDgrOAQU0fZt1NMoA0Swri\nici2KBuZFyfzL4/ohNDszPpkkGtldOJ19sQxibnMICWKVwm2hNURt47uVxbpMZBr\nVIOYXnOsLHGXVvTq66oI3/VE1y9RdWtMMBKBRO0a6KsihxfhFqcKVh9qXoyW3pL9\nFY4yfQKCAQEAo96IrdHxXrA6dsejS1mCFQI5SCSWgHy/I1VtJbidDPr8xykGd9wV\nT3m6xs5OQLnMKr9sSVOhCWATKy2UcujUo3PWcgXnFWImd7/Ly3M25SQzHY2ed+6N\n/3VGJlturKv8KZAlQr5SXn1i+cyghbRFFZ3l9CpBtDdAw8yGrHc24SLCx6GMRGU/\nbCW4KDI8V9rRyFdXF9BdwS1k3GOXTe2xwbHpLqjAXclKB5Q7Lk4uThmu1mYk2ZjB\nuC4lnvOb05jt0LR+6CnxD+m2FwIi1LFUYJz2GR5OU3j+DDW4cMk0mDktciAOI95J\njNylkURyf8CVfKTM+HJmFBlwByoVlmbDJwKCAQEApPUYDivNA65Nr+1dUjkhVm2L\nrhrcXW4lkl+NTBfeEI7pVYH6NTilNATL3hidPzvAhGZWJiCTy1v6ANkod84JMeFE\nvSXzB6JAH0Tt2etZm9yNJvSPNJE8igsbsmZC4lpKmBdgzhu90P7p+xXm3D8JDdpC\nnxczaHtcky1wyZfkbr9RH4z2DRPGMpxX31btGdSfuUzpqBnc1yfmMJPoys+PDucX\n+hQs10HhxM7HCKn5agwFFloL568JSEZ3ngKZdi+x5zpHewGQYy0vbajvmbtJRdZO\no0kBYdtvGWsCLiCNBX8r1QRi/zbgv8cULvFTtX2EuJlqcCJFYf1y10J959unZQ==\n-----END RSA PRIVATE KEY-----\n"} 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: cephadm 2026-04-16T19:20:55.964108+0000 mgr.vm01.nwhpas (mgr.14170) 3 : cephadm [INF] [16/Apr/2026:19:20:55] ENGINE Bus STARTING 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: cephadm 2026-04-16T19:20:55.964108+0000 mgr.vm01.nwhpas (mgr.14170) 3 : cephadm [INF] [16/Apr/2026:19:20:55] ENGINE Bus STARTING 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: cephadm 2026-04-16T19:20:56.066916+0000 mgr.vm01.nwhpas (mgr.14170) 4 : cephadm [INF] [16/Apr/2026:19:20:56] ENGINE Serving on http://192.168.123.101:8765 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: cephadm 2026-04-16T19:20:56.066916+0000 mgr.vm01.nwhpas (mgr.14170) 4 : cephadm [INF] [16/Apr/2026:19:20:56] ENGINE Serving on http://192.168.123.101:8765 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: cephadm 2026-04-16T19:20:56.178034+0000 mgr.vm01.nwhpas (mgr.14170) 5 : cephadm [INF] [16/Apr/2026:19:20:56] ENGINE Serving on https://192.168.123.101:7150 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: cephadm 2026-04-16T19:20:56.178034+0000 mgr.vm01.nwhpas (mgr.14170) 5 : cephadm [INF] [16/Apr/2026:19:20:56] ENGINE Serving on https://192.168.123.101:7150 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: cephadm 2026-04-16T19:20:56.178136+0000 mgr.vm01.nwhpas (mgr.14170) 6 : cephadm [INF] [16/Apr/2026:19:20:56] ENGINE Bus STARTED 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: cephadm 2026-04-16T19:20:56.178136+0000 mgr.vm01.nwhpas (mgr.14170) 6 : cephadm [INF] [16/Apr/2026:19:20:56] ENGINE Bus STARTED 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: cephadm 2026-04-16T19:20:56.178411+0000 mgr.vm01.nwhpas (mgr.14170) 7 : cephadm [INF] [16/Apr/2026:19:20:56] ENGINE Client ('192.168.123.101', 44508) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: cephadm 2026-04-16T19:20:56.178411+0000 mgr.vm01.nwhpas (mgr.14170) 7 : cephadm [INF] [16/Apr/2026:19:20:56] ENGINE Client ('192.168.123.101', 44508) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: audit 2026-04-16T19:20:56.273715+0000 mgr.vm01.nwhpas (mgr.14170) 8 : audit [DBG] from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch certmgr generate-certificates", "module_name": "dashboard", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:57.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:56 vm01 bash[28222]: audit 2026-04-16T19:20:56.273715+0000 mgr.vm01.nwhpas (mgr.14170) 8 : audit [DBG] from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch certmgr generate-certificates", "module_name": "dashboard", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:57.278 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout SSL certificate updated 2026-04-16T19:20:57.736 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout SSL certificate key updated 2026-04-16T19:20:57.736 INFO:teuthology.orchestra.run.vm01.stdout:Creating initial admin user... 2026-04-16T19:20:57.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:57 vm01 bash[28222]: cluster 2026-04-16T19:20:56.876921+0000 mon.vm01 (mon.0) 98 : cluster [DBG] mgrmap e12: vm01.nwhpas(active, since 2s) 2026-04-16T19:20:57.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:57 vm01 bash[28222]: cluster 2026-04-16T19:20:56.876921+0000 mon.vm01 (mon.0) 98 : cluster [DBG] mgrmap e12: vm01.nwhpas(active, since 2s) 2026-04-16T19:20:57.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:57 vm01 bash[28222]: audit 2026-04-16T19:20:57.224794+0000 mgr.vm01.nwhpas (mgr.14170) 9 : audit [DBG] from='client.14184 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:57.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:57 vm01 bash[28222]: audit 2026-04-16T19:20:57.224794+0000 mgr.vm01.nwhpas (mgr.14170) 9 : audit [DBG] from='client.14184 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:57.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:57 vm01 bash[28222]: audit 2026-04-16T19:20:57.228509+0000 mon.vm01 (mon.0) 99 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:57.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:57 vm01 bash[28222]: audit 2026-04-16T19:20:57.228509+0000 mon.vm01 (mon.0) 99 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:57.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:57 vm01 bash[28222]: audit 2026-04-16T19:20:57.670976+0000 mon.vm01 (mon.0) 100 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:57.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:57 vm01 bash[28222]: audit 2026-04-16T19:20:57.670976+0000 mon.vm01 (mon.0) 100 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:58.401 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$vavG4hZMagLnt3pGovRnKe52YqOlmv28UqM5rym9wxyVK2nYBEO8G", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1776367258, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-04-16T19:20:58.402 INFO:teuthology.orchestra.run.vm01.stdout:Fetching dashboard port number... 2026-04-16T19:20:58.831 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 8443 2026-04-16T19:20:58.831 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-04-16T19:20:58.831 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-04-16T19:20:58.833 INFO:teuthology.orchestra.run.vm01.stdout:Ceph Dashboard is now available at: 2026-04-16T19:20:58.833 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:20:58.833 INFO:teuthology.orchestra.run.vm01.stdout: URL: https://vm01.local:8443/ 2026-04-16T19:20:58.833 INFO:teuthology.orchestra.run.vm01.stdout: User: admin 2026-04-16T19:20:58.833 INFO:teuthology.orchestra.run.vm01.stdout: Password: 4twq0ps3z9 2026-04-16T19:20:58.833 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:20:58.833 INFO:teuthology.orchestra.run.vm01.stdout:Saving cluster configuration to /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config directory 2026-04-16T19:20:59.338 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-04-16T19:20:59.338 INFO:teuthology.orchestra.run.vm01.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-04-16T19:20:59.338 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout:Or, if you are only running a single cluster on this host: 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout:Please consider enabling telemetry to help improve Ceph: 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout: ceph telemetry on 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout:For more information see: 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:20:59.339 INFO:teuthology.orchestra.run.vm01.stdout:Bootstrap complete. 2026-04-16T19:20:59.344 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout static 2026-04-16T19:20:59.347 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active logrotate 2026-04-16T19:20:59.347 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-04-16T19:20:59.347 INFO:teuthology.orchestra.run.vm01.stdout:Enabling the logrotate.timer service to perform daily log rotation. 2026-04-16T19:20:59.573 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:59 vm01 bash[28222]: audit 2026-04-16T19:20:57.666682+0000 mgr.vm01.nwhpas (mgr.14170) 10 : audit [DBG] from='client.14186 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:59.573 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:59 vm01 bash[28222]: audit 2026-04-16T19:20:57.666682+0000 mgr.vm01.nwhpas (mgr.14170) 10 : audit [DBG] from='client.14186 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:59.573 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:59 vm01 bash[28222]: audit 2026-04-16T19:20:58.109964+0000 mgr.vm01.nwhpas (mgr.14170) 11 : audit [DBG] from='client.14188 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:59.573 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:59 vm01 bash[28222]: audit 2026-04-16T19:20:58.109964+0000 mgr.vm01.nwhpas (mgr.14170) 11 : audit [DBG] from='client.14188 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:20:59.573 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:59 vm01 bash[28222]: audit 2026-04-16T19:20:58.303790+0000 mon.vm01 (mon.0) 101 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:59.573 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:59 vm01 bash[28222]: audit 2026-04-16T19:20:58.303790+0000 mon.vm01 (mon.0) 101 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:20:59.573 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:59 vm01 bash[28222]: audit 2026-04-16T19:20:58.782030+0000 mon.vm01 (mon.0) 102 : audit [DBG] from='client.? 192.168.123.101:0/2294032979' entity='client.admin' cmd={"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"} : dispatch 2026-04-16T19:20:59.573 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:59 vm01 bash[28222]: audit 2026-04-16T19:20:58.782030+0000 mon.vm01 (mon.0) 102 : audit [DBG] from='client.? 192.168.123.101:0/2294032979' entity='client.admin' cmd={"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"} : dispatch 2026-04-16T19:20:59.573 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:59 vm01 bash[28222]: audit 2026-04-16T19:20:59.268984+0000 mon.vm01 (mon.0) 103 : audit [INF] from='client.? 192.168.123.101:0/2232701843' entity='client.admin' 2026-04-16T19:20:59.573 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:59 vm01 bash[28222]: audit 2026-04-16T19:20:59.268984+0000 mon.vm01 (mon.0) 103 : audit [INF] from='client.? 192.168.123.101:0/2232701843' entity='client.admin' 2026-04-16T19:20:59.573 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:20:59 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:20:59.579 INFO:tasks.cephadm:Fetching config... 2026-04-16T19:20:59.579 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:20:59.579 DEBUG:teuthology.orchestra.run.vm01:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-04-16T19:20:59.582 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-04-16T19:20:59.582 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:20:59.582 DEBUG:teuthology.orchestra.run.vm01:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-04-16T19:20:59.626 INFO:tasks.cephadm:Fetching mon keyring... 2026-04-16T19:20:59.626 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:20:59.626 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/keyring of=/dev/stdout 2026-04-16T19:20:59.676 INFO:tasks.cephadm:Fetching pub ssh key... 2026-04-16T19:20:59.676 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:20:59.676 DEBUG:teuthology.orchestra.run.vm01:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-04-16T19:20:59.723 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-04-16T19:20:59.723 DEBUG:teuthology.orchestra.run.vm01:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCkBQTQO169TZqfttYYufnyFLyqPwEq4wjbDZ7THNt61GVTMjGHb7iEqVBuanqz2bvmgyU5Ki0+4kkp9KZ/JQh7t9tsOghZOwhSKXOwPRdyg7rH/Iq26nXa3vQrI8GMCfNVh0iRDxWeHuvH+4hiOtTtT9Zj+7ZrEfGD11CHW1WeuDg4W4QjPWTgM8Qhg5081nS4EfATk/e3gibraErvm/FS5nZZa2FyQ3KtzxWtJNQaKT8+eBF3ltRWhcBrYzgWhq+lBRlCNvI1UoqlaQxSlMalUJ2ooaCOk5wwCIeyJZGv0y3IgANZpQzD7h3n/4ufVNzpQYsNCXh5VhCU3AiHxYNUkXpTdB+2Ceno8pN5lyPb1UGnNmuCabt6/h/VqKn/hhb/adcxt3S4q/bPX5dLGH85Wzd0ZWwFdFR5yxHP8E0RuU2vlLnOzT6FNrytZzecnADYAnkJ4lRzH7IgHnhS/l/ifIT3B9RkERtAjWnkuE0O3nxg1Iappa4VCjK99xWcZzk= ceph-3711bb6a-39c9-11f1-9688-8928648d55a6' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-04-16T19:20:59.775 INFO:teuthology.orchestra.run.vm01.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCkBQTQO169TZqfttYYufnyFLyqPwEq4wjbDZ7THNt61GVTMjGHb7iEqVBuanqz2bvmgyU5Ki0+4kkp9KZ/JQh7t9tsOghZOwhSKXOwPRdyg7rH/Iq26nXa3vQrI8GMCfNVh0iRDxWeHuvH+4hiOtTtT9Zj+7ZrEfGD11CHW1WeuDg4W4QjPWTgM8Qhg5081nS4EfATk/e3gibraErvm/FS5nZZa2FyQ3KtzxWtJNQaKT8+eBF3ltRWhcBrYzgWhq+lBRlCNvI1UoqlaQxSlMalUJ2ooaCOk5wwCIeyJZGv0y3IgANZpQzD7h3n/4ufVNzpQYsNCXh5VhCU3AiHxYNUkXpTdB+2Ceno8pN5lyPb1UGnNmuCabt6/h/VqKn/hhb/adcxt3S4q/bPX5dLGH85Wzd0ZWwFdFR5yxHP8E0RuU2vlLnOzT6FNrytZzecnADYAnkJ4lRzH7IgHnhS/l/ifIT3B9RkERtAjWnkuE0O3nxg1Iappa4VCjK99xWcZzk= ceph-3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:59.781 DEBUG:teuthology.orchestra.run.vm04:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCkBQTQO169TZqfttYYufnyFLyqPwEq4wjbDZ7THNt61GVTMjGHb7iEqVBuanqz2bvmgyU5Ki0+4kkp9KZ/JQh7t9tsOghZOwhSKXOwPRdyg7rH/Iq26nXa3vQrI8GMCfNVh0iRDxWeHuvH+4hiOtTtT9Zj+7ZrEfGD11CHW1WeuDg4W4QjPWTgM8Qhg5081nS4EfATk/e3gibraErvm/FS5nZZa2FyQ3KtzxWtJNQaKT8+eBF3ltRWhcBrYzgWhq+lBRlCNvI1UoqlaQxSlMalUJ2ooaCOk5wwCIeyJZGv0y3IgANZpQzD7h3n/4ufVNzpQYsNCXh5VhCU3AiHxYNUkXpTdB+2Ceno8pN5lyPb1UGnNmuCabt6/h/VqKn/hhb/adcxt3S4q/bPX5dLGH85Wzd0ZWwFdFR5yxHP8E0RuU2vlLnOzT6FNrytZzecnADYAnkJ4lRzH7IgHnhS/l/ifIT3B9RkERtAjWnkuE0O3nxg1Iappa4VCjK99xWcZzk= ceph-3711bb6a-39c9-11f1-9688-8928648d55a6' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-04-16T19:20:59.795 INFO:teuthology.orchestra.run.vm04.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCkBQTQO169TZqfttYYufnyFLyqPwEq4wjbDZ7THNt61GVTMjGHb7iEqVBuanqz2bvmgyU5Ki0+4kkp9KZ/JQh7t9tsOghZOwhSKXOwPRdyg7rH/Iq26nXa3vQrI8GMCfNVh0iRDxWeHuvH+4hiOtTtT9Zj+7ZrEfGD11CHW1WeuDg4W4QjPWTgM8Qhg5081nS4EfATk/e3gibraErvm/FS5nZZa2FyQ3KtzxWtJNQaKT8+eBF3ltRWhcBrYzgWhq+lBRlCNvI1UoqlaQxSlMalUJ2ooaCOk5wwCIeyJZGv0y3IgANZpQzD7h3n/4ufVNzpQYsNCXh5VhCU3AiHxYNUkXpTdB+2Ceno8pN5lyPb1UGnNmuCabt6/h/VqKn/hhb/adcxt3S4q/bPX5dLGH85Wzd0ZWwFdFR5yxHP8E0RuU2vlLnOzT6FNrytZzecnADYAnkJ4lRzH7IgHnhS/l/ifIT3B9RkERtAjWnkuE0O3nxg1Iappa4VCjK99xWcZzk= ceph-3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:20:59.801 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-04-16T19:21:00.113 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:21:00.618 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-04-16T19:21:00.618 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-04-16T19:21:00.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:00 vm01 bash[28222]: audit 2026-04-16T19:20:59.869528+0000 mon.vm01 (mon.0) 104 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:00.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:00 vm01 bash[28222]: audit 2026-04-16T19:20:59.869528+0000 mon.vm01 (mon.0) 104 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:00.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:00 vm01 bash[28222]: audit 2026-04-16T19:21:00.544659+0000 mon.vm01 (mon.0) 105 : audit [INF] from='client.? 192.168.123.101:0/4081952867' entity='client.admin' 2026-04-16T19:21:00.871 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:00 vm01 bash[28222]: audit 2026-04-16T19:21:00.544659+0000 mon.vm01 (mon.0) 105 : audit [INF] from='client.? 192.168.123.101:0/4081952867' entity='client.admin' 2026-04-16T19:21:00.872 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:00 vm01 bash[28222]: audit 2026-04-16T19:21:00.568620+0000 mon.vm01 (mon.0) 106 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:00.872 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:00 vm01 bash[28222]: audit 2026-04-16T19:21:00.568620+0000 mon.vm01 (mon.0) 106 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:00.896 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:21:01.380 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm04 2026-04-16T19:21:01.381 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-16T19:21:01.381 DEBUG:teuthology.orchestra.run.vm04:> dd of=/etc/ceph/ceph.conf 2026-04-16T19:21:01.384 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-16T19:21:01.384 DEBUG:teuthology.orchestra.run.vm04:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-16T19:21:01.429 INFO:tasks.cephadm:Adding host vm04 to orchestrator... 2026-04-16T19:21:01.429 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch host add vm04 2026-04-16T19:21:01.734 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:21:02.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:02 vm01 bash[28222]: audit 2026-04-16T19:21:01.303501+0000 mgr.vm01.nwhpas (mgr.14170) 12 : audit [DBG] from='client.14196 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:21:02.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:02 vm01 bash[28222]: audit 2026-04-16T19:21:01.303501+0000 mgr.vm01.nwhpas (mgr.14170) 12 : audit [DBG] from='client.14196 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:21:02.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:02 vm01 bash[28222]: audit 2026-04-16T19:21:01.307641+0000 mon.vm01 (mon.0) 107 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:02.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:02 vm01 bash[28222]: audit 2026-04-16T19:21:01.307641+0000 mon.vm01 (mon.0) 107 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:02.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:02 vm01 bash[28222]: cluster 2026-04-16T19:21:01.573617+0000 mon.vm01 (mon.0) 108 : cluster [DBG] mgrmap e13: vm01.nwhpas(active, since 6s) 2026-04-16T19:21:02.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:02 vm01 bash[28222]: cluster 2026-04-16T19:21:01.573617+0000 mon.vm01 (mon.0) 108 : cluster [DBG] mgrmap e13: vm01.nwhpas(active, since 6s) 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.338831+0000 mgr.vm01.nwhpas (mgr.14170) 13 : audit [DBG] from='client.14198 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm04", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.338831+0000 mgr.vm01.nwhpas (mgr.14170) 13 : audit [DBG] from='client.14198 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm04", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.512542+0000 mon.vm01 (mon.0) 109 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.512542+0000 mon.vm01 (mon.0) 109 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.515541+0000 mon.vm01 (mon.0) 110 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.515541+0000 mon.vm01 (mon.0) 110 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.516289+0000 mon.vm01 (mon.0) 111 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.516289+0000 mon.vm01 (mon.0) 111 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.517245+0000 mon.vm01 (mon.0) 112 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.517245+0000 mon.vm01 (mon.0) 112 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.517966+0000 mon.vm01 (mon.0) 113 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.517966+0000 mon.vm01 (mon.0) 113 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.665906+0000 mon.vm01 (mon.0) 114 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.665906+0000 mon.vm01 (mon.0) 114 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.669162+0000 mon.vm01 (mon.0) 115 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.669162+0000 mon.vm01 (mon.0) 115 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.672961+0000 mon.vm01 (mon.0) 116 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.672961+0000 mon.vm01 (mon.0) 116 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.674312+0000 mon.vm01 (mon.0) 117 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.674312+0000 mon.vm01 (mon.0) 117 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.675552+0000 mon.vm01 (mon.0) 118 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.675552+0000 mon.vm01 (mon.0) 118 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.677723+0000 mon.vm01 (mon.0) 119 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 bash[28222]: audit 2026-04-16T19:21:02.677723+0000 mon.vm01 (mon.0) 119 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:03.496 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:03 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:02.518860+0000 mgr.vm01.nwhpas (mgr.14170) 14 : cephadm [INF] Updating vm01:/etc/ceph/ceph.conf 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:02.518860+0000 mgr.vm01.nwhpas (mgr.14170) 14 : cephadm [INF] Updating vm01:/etc/ceph/ceph.conf 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:02.557412+0000 mgr.vm01.nwhpas (mgr.14170) 15 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:02.557412+0000 mgr.vm01.nwhpas (mgr.14170) 15 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:02.590993+0000 mgr.vm01.nwhpas (mgr.14170) 16 : cephadm [INF] Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:02.590993+0000 mgr.vm01.nwhpas (mgr.14170) 16 : cephadm [INF] Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:02.626603+0000 mgr.vm01.nwhpas (mgr.14170) 17 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.client.admin.keyring 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:02.626603+0000 mgr.vm01.nwhpas (mgr.14170) 17 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.client.admin.keyring 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:02.678429+0000 mgr.vm01.nwhpas (mgr.14170) 18 : cephadm [INF] Deploying daemon ceph-exporter.vm01 on vm01 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:02.678429+0000 mgr.vm01.nwhpas (mgr.14170) 18 : cephadm [INF] Deploying daemon ceph-exporter.vm01 on vm01 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:03.265300+0000 mgr.vm01.nwhpas (mgr.14170) 19 : cephadm [INF] Deploying cephadm binary to vm04 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: cephadm 2026-04-16T19:21:03.265300+0000 mgr.vm01.nwhpas (mgr.14170) 19 : cephadm [INF] Deploying cephadm binary to vm04 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.975079+0000 mon.vm01 (mon.0) 120 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.975079+0000 mon.vm01 (mon.0) 120 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.978425+0000 mon.vm01 (mon.0) 121 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:04.547 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.978425+0000 mon.vm01 (mon.0) 121 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:04.548 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.982961+0000 mon.vm01 (mon.0) 122 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:04.548 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.982961+0000 mon.vm01 (mon.0) 122 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:04.548 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.985285+0000 mon.vm01 (mon.0) 123 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:04.548 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.985285+0000 mon.vm01 (mon.0) 123 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:04.548 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.985998+0000 mon.vm01 (mon.0) 124 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:21:04.548 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.985998+0000 mon.vm01 (mon.0) 124 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:21:04.548 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.987099+0000 mon.vm01 (mon.0) 125 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-16T19:21:04.548 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.987099+0000 mon.vm01 (mon.0) 125 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-16T19:21:04.548 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.988584+0000 mon.vm01 (mon.0) 126 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:04.548 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 bash[28222]: audit 2026-04-16T19:21:03.988584+0000 mon.vm01 (mon.0) 126 : audit [DBG] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:04.835 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:04.835 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:04 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:05.322 INFO:teuthology.orchestra.run.vm01.stdout:Added host 'vm04' with addr '192.168.123.104' 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: cephadm 2026-04-16T19:21:03.991057+0000 mgr.vm01.nwhpas (mgr.14170) 20 : cephadm [INF] Deploying daemon crash.vm01 on vm01 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: cephadm 2026-04-16T19:21:03.991057+0000 mgr.vm01.nwhpas (mgr.14170) 20 : cephadm [INF] Deploying daemon crash.vm01 on vm01 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: audit 2026-04-16T19:21:04.870743+0000 mon.vm01 (mon.0) 127 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: audit 2026-04-16T19:21:04.870743+0000 mon.vm01 (mon.0) 127 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: audit 2026-04-16T19:21:04.874231+0000 mon.vm01 (mon.0) 128 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: audit 2026-04-16T19:21:04.874231+0000 mon.vm01 (mon.0) 128 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: audit 2026-04-16T19:21:04.877652+0000 mon.vm01 (mon.0) 129 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: audit 2026-04-16T19:21:04.877652+0000 mon.vm01 (mon.0) 129 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: audit 2026-04-16T19:21:04.880331+0000 mon.vm01 (mon.0) 130 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: audit 2026-04-16T19:21:04.880331+0000 mon.vm01 (mon.0) 130 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: audit 2026-04-16T19:21:05.095203+0000 mon.vm01 (mon.0) 131 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 bash[28222]: audit 2026-04-16T19:21:05.095203+0000 mon.vm01 (mon.0) 131 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:05.341 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:05.459 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch host ls --format=json 2026-04-16T19:21:05.637 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:05 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:05.741 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:21:06.203 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:21:06.203 INFO:teuthology.orchestra.run.vm01.stdout:[{"addr": "192.168.123.101", "hostname": "vm01", "labels": [], "status": ""}, {"addr": "192.168.123.104", "hostname": "vm04", "labels": [], "status": ""}] 2026-04-16T19:21:06.266 INFO:tasks.cephadm:Setting crush tunables to default 2026-04-16T19:21:06.266 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd crush tunables default 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: cephadm 2026-04-16T19:21:04.881069+0000 mgr.vm01.nwhpas (mgr.14170) 21 : cephadm [INF] Deploying daemon node-exporter.vm01 on vm01 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: cephadm 2026-04-16T19:21:04.881069+0000 mgr.vm01.nwhpas (mgr.14170) 21 : cephadm [INF] Deploying daemon node-exporter.vm01 on vm01 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: audit 2026-04-16T19:21:05.321762+0000 mon.vm01 (mon.0) 132 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: audit 2026-04-16T19:21:05.321762+0000 mon.vm01 (mon.0) 132 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: cephadm 2026-04-16T19:21:05.322194+0000 mgr.vm01.nwhpas (mgr.14170) 22 : cephadm [INF] Added host vm04 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: cephadm 2026-04-16T19:21:05.322194+0000 mgr.vm01.nwhpas (mgr.14170) 22 : cephadm [INF] Added host vm04 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: audit 2026-04-16T19:21:05.849450+0000 mon.vm01 (mon.0) 133 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: audit 2026-04-16T19:21:05.849450+0000 mon.vm01 (mon.0) 133 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: audit 2026-04-16T19:21:05.852386+0000 mon.vm01 (mon.0) 134 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: audit 2026-04-16T19:21:05.852386+0000 mon.vm01 (mon.0) 134 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: audit 2026-04-16T19:21:05.854868+0000 mon.vm01 (mon.0) 135 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: audit 2026-04-16T19:21:05.854868+0000 mon.vm01 (mon.0) 135 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: audit 2026-04-16T19:21:05.857069+0000 mon.vm01 (mon.0) 136 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:06.513 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:06 vm01 bash[28222]: audit 2026-04-16T19:21:05.857069+0000 mon.vm01 (mon.0) 136 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:06.524 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:21:07.332 INFO:teuthology.orchestra.run.vm01.stderr:adjusted tunables profile to default 2026-04-16T19:21:07.435 INFO:tasks.cephadm:Adding mon.vm01 on vm01 2026-04-16T19:21:07.435 INFO:tasks.cephadm:Adding mon.vm04 on vm04 2026-04-16T19:21:07.435 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch apply mon '2;vm01:192.168.123.101=vm01;vm04:192.168.123.104=vm04' 2026-04-16T19:21:07.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:07 vm01 bash[28222]: cephadm 2026-04-16T19:21:05.861905+0000 mgr.vm01.nwhpas (mgr.14170) 23 : cephadm [INF] Deploying daemon alertmanager.vm01 on vm01 2026-04-16T19:21:07.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:07 vm01 bash[28222]: cephadm 2026-04-16T19:21:05.861905+0000 mgr.vm01.nwhpas (mgr.14170) 23 : cephadm [INF] Deploying daemon alertmanager.vm01 on vm01 2026-04-16T19:21:07.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:07 vm01 bash[28222]: audit 2026-04-16T19:21:06.200959+0000 mgr.vm01.nwhpas (mgr.14170) 24 : audit [DBG] from='client.14201 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:21:07.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:07 vm01 bash[28222]: audit 2026-04-16T19:21:06.200959+0000 mgr.vm01.nwhpas (mgr.14170) 24 : audit [DBG] from='client.14201 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:21:07.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:07 vm01 bash[28222]: audit 2026-04-16T19:21:06.885027+0000 mon.vm01 (mon.0) 137 : audit [INF] from='client.? 192.168.123.101:0/714351319' entity='client.admin' cmd={"prefix": "osd crush tunables", "profile": "default"} : dispatch 2026-04-16T19:21:07.600 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:07 vm01 bash[28222]: audit 2026-04-16T19:21:06.885027+0000 mon.vm01 (mon.0) 137 : audit [INF] from='client.? 192.168.123.101:0/714351319' entity='client.admin' cmd={"prefix": "osd crush tunables", "profile": "default"} : dispatch 2026-04-16T19:21:07.718 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:07.718 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:08.143 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled mon update... 2026-04-16T19:21:08.225 DEBUG:teuthology.orchestra.run.vm04:mon.vm04> sudo journalctl -f -n 0 -u ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mon.vm04.service 2026-04-16T19:21:08.226 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:08.226 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:08 vm01 bash[28222]: audit 2026-04-16T19:21:07.331988+0000 mon.vm01 (mon.0) 138 : audit [INF] from='client.? 192.168.123.101:0/714351319' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-04-16T19:21:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:08 vm01 bash[28222]: audit 2026-04-16T19:21:07.331988+0000 mon.vm01 (mon.0) 138 : audit [INF] from='client.? 192.168.123.101:0/714351319' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-04-16T19:21:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:08 vm01 bash[28222]: cluster 2026-04-16T19:21:07.333875+0000 mon.vm01 (mon.0) 139 : cluster [DBG] osdmap e4: 0 total, 0 up, 0 in 2026-04-16T19:21:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:08 vm01 bash[28222]: cluster 2026-04-16T19:21:07.333875+0000 mon.vm01 (mon.0) 139 : cluster [DBG] osdmap e4: 0 total, 0 up, 0 in 2026-04-16T19:21:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:08 vm01 bash[28222]: audit 2026-04-16T19:21:08.141697+0000 mon.vm01 (mon.0) 140 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:08 vm01 bash[28222]: audit 2026-04-16T19:21:08.141697+0000 mon.vm01 (mon.0) 140 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:08.533 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:08.533 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:09.019 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:09.019 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:09.019 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:09.132 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:09 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:09.408 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:09 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:09.408 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:09 vm01 bash[28222]: audit 2026-04-16T19:21:08.136906+0000 mgr.vm01.nwhpas (mgr.14170) 25 : audit [DBG] from='client.14205 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm01:192.168.123.101=vm01;vm04:192.168.123.104=vm04", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:21:09.408 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:09 vm01 bash[28222]: audit 2026-04-16T19:21:08.136906+0000 mgr.vm01.nwhpas (mgr.14170) 25 : audit [DBG] from='client.14205 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm01:192.168.123.101=vm01;vm04:192.168.123.104=vm04", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:21:09.408 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:09 vm01 bash[28222]: cephadm 2026-04-16T19:21:08.138296+0000 mgr.vm01.nwhpas (mgr.14170) 26 : cephadm [INF] Saving service mon spec with placement vm01:192.168.123.101=vm01;vm04:192.168.123.104=vm04;count:2 2026-04-16T19:21:09.408 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:09 vm01 bash[28222]: cephadm 2026-04-16T19:21:08.138296+0000 mgr.vm01.nwhpas (mgr.14170) 26 : cephadm [INF] Saving service mon spec with placement vm01:192.168.123.101=vm01;vm04:192.168.123.104=vm04;count:2 2026-04-16T19:21:09.408 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:09 vm01 bash[28222]: audit 2026-04-16T19:21:09.018207+0000 mon.vm01 (mon.0) 141 : audit [DBG] from='client.? 192.168.123.104:0/1218969352' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:09.408 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:09 vm01 bash[28222]: audit 2026-04-16T19:21:09.018207+0000 mon.vm01 (mon.0) 141 : audit [DBG] from='client.? 192.168.123.104:0/1218969352' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:10.149 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:10.149 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:10.431 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:10.431 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:09.396210+0000 mon.vm01 (mon.0) 142 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:09.396210+0000 mon.vm01 (mon.0) 142 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:09.400393+0000 mon.vm01 (mon.0) 143 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:09.400393+0000 mon.vm01 (mon.0) 143 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:09.403669+0000 mon.vm01 (mon.0) 144 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:09.403669+0000 mon.vm01 (mon.0) 144 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:09.406365+0000 mon.vm01 (mon.0) 145 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:09.406365+0000 mon.vm01 (mon.0) 145 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: cephadm 2026-04-16T19:21:09.408072+0000 mgr.vm01.nwhpas (mgr.14170) 27 : cephadm [INF] Generating cephadm-signed certificates for grafana_cert/grafana_key 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: cephadm 2026-04-16T19:21:09.408072+0000 mgr.vm01.nwhpas (mgr.14170) 27 : cephadm [INF] Generating cephadm-signed certificates for grafana_cert/grafana_key 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.099638+0000 mon.vm01 (mon.0) 146 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.099638+0000 mon.vm01 (mon.0) 146 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.340210+0000 mon.vm01 (mon.0) 147 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.340210+0000 mon.vm01 (mon.0) 147 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.670 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.344387+0000 mon.vm01 (mon.0) 148 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.671 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.344387+0000 mon.vm01 (mon.0) 148 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.671 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.349158+0000 mon.vm01 (mon.0) 149 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.671 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.349158+0000 mon.vm01 (mon.0) 149 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.671 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.351215+0000 mon.vm01 (mon.0) 150 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.671 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.351215+0000 mon.vm01 (mon.0) 150 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.671 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.354952+0000 mon.vm01 (mon.0) 151 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-16T19:21:10.671 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.354952+0000 mon.vm01 (mon.0) 151 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-16T19:21:10.671 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.357380+0000 mon.vm01 (mon.0) 152 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.671 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:10 vm01 bash[28222]: audit 2026-04-16T19:21:10.357380+0000 mon.vm01 (mon.0) 152 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:10.849 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:10.849 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:10.849 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:11.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:11 vm01 bash[28222]: audit 2026-04-16T19:21:10.355311+0000 mgr.vm01.nwhpas (mgr.14170) 28 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-16T19:21:11.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:11 vm01 bash[28222]: audit 2026-04-16T19:21:10.355311+0000 mgr.vm01.nwhpas (mgr.14170) 28 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-16T19:21:11.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:11 vm01 bash[28222]: cephadm 2026-04-16T19:21:10.366825+0000 mgr.vm01.nwhpas (mgr.14170) 29 : cephadm [INF] Deploying daemon grafana.vm01 on vm01 2026-04-16T19:21:11.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:11 vm01 bash[28222]: cephadm 2026-04-16T19:21:10.366825+0000 mgr.vm01.nwhpas (mgr.14170) 29 : cephadm [INF] Deploying daemon grafana.vm01 on vm01 2026-04-16T19:21:11.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:11 vm01 bash[28222]: audit 2026-04-16T19:21:10.848326+0000 mon.vm01 (mon.0) 153 : audit [DBG] from='client.? 192.168.123.104:0/3605315952' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:11.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:11 vm01 bash[28222]: audit 2026-04-16T19:21:10.848326+0000 mon.vm01 (mon.0) 153 : audit [DBG] from='client.? 192.168.123.104:0/3605315952' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:11.913 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:11.913 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:12.183 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:12.183 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:12.603 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:12.603 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:12.603 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:12.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:12 vm01 bash[28222]: audit 2026-04-16T19:21:12.602951+0000 mon.vm01 (mon.0) 154 : audit [DBG] from='client.? 192.168.123.104:0/2000104817' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:12.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:12 vm01 bash[28222]: audit 2026-04-16T19:21:12.602951+0000 mon.vm01 (mon.0) 154 : audit [DBG] from='client.? 192.168.123.104:0/2000104817' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:13.665 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:13.665 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:13.941 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:13.941 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:14.389 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:14.390 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:14.390 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:14.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:14 vm01 bash[28222]: audit 2026-04-16T19:21:14.389122+0000 mon.vm01 (mon.0) 155 : audit [DBG] from='client.? 192.168.123.104:0/3768523410' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:14.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:14 vm01 bash[28222]: audit 2026-04-16T19:21:14.389122+0000 mon.vm01 (mon.0) 155 : audit [DBG] from='client.? 192.168.123.104:0/3768523410' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:15.550 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:15.550 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:15.818 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:15.818 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:16.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:15 vm01 bash[28222]: cluster 2026-04-16T19:21:14.818215+0000 mgr.vm01.nwhpas (mgr.14170) 30 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:16.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:15 vm01 bash[28222]: cluster 2026-04-16T19:21:14.818215+0000 mgr.vm01.nwhpas (mgr.14170) 30 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:16.250 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:16.250 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:16.250 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:17.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:16 vm01 bash[28222]: audit 2026-04-16T19:21:16.249660+0000 mon.vm01 (mon.0) 156 : audit [DBG] from='client.? 192.168.123.104:0/2072753631' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:17.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:16 vm01 bash[28222]: audit 2026-04-16T19:21:16.249660+0000 mon.vm01 (mon.0) 156 : audit [DBG] from='client.? 192.168.123.104:0/2072753631' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:17.308 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:17.309 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:17.578 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:17.578 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:18.443 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:18.444 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:18.444 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:18.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:18 vm01 bash[28222]: cluster 2026-04-16T19:21:16.818398+0000 mgr.vm01.nwhpas (mgr.14170) 31 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:18.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:18 vm01 bash[28222]: cluster 2026-04-16T19:21:16.818398+0000 mgr.vm01.nwhpas (mgr.14170) 31 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:19.503 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:19 vm01 bash[28222]: audit 2026-04-16T19:21:18.442686+0000 mon.vm01 (mon.0) 157 : audit [DBG] from='client.? 192.168.123.104:0/2378666514' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:19.503 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:19 vm01 bash[28222]: audit 2026-04-16T19:21:18.442686+0000 mon.vm01 (mon.0) 157 : audit [DBG] from='client.? 192.168.123.104:0/2378666514' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:19.503 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:19 vm01 bash[28222]: cluster 2026-04-16T19:21:18.818649+0000 mgr.vm01.nwhpas (mgr.14170) 32 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:19.503 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:19 vm01 bash[28222]: cluster 2026-04-16T19:21:18.818649+0000 mgr.vm01.nwhpas (mgr.14170) 32 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:19.518 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:19.519 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:19.757 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:19 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:19.774 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:19.774 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:20.182 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:20.182 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:20.182 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:19 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.881453+0000 mon.vm01 (mon.0) 158 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.881453+0000 mon.vm01 (mon.0) 158 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.884954+0000 mon.vm01 (mon.0) 159 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.884954+0000 mon.vm01 (mon.0) 159 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.888194+0000 mon.vm01 (mon.0) 160 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.888194+0000 mon.vm01 (mon.0) 160 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.891768+0000 mon.vm01 (mon.0) 161 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.891768+0000 mon.vm01 (mon.0) 161 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.895277+0000 mon.vm01 (mon.0) 162 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.895277+0000 mon.vm01 (mon.0) 162 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.899716+0000 mon.vm01 (mon.0) 163 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.899716+0000 mon.vm01 (mon.0) 163 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.904182+0000 mon.vm01 (mon.0) 164 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.904182+0000 mon.vm01 (mon.0) 164 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.907962+0000 mon.vm01 (mon.0) 165 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:19.907962+0000 mon.vm01 (mon.0) 165 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: cephadm 2026-04-16T19:21:20.072171+0000 mgr.vm01.nwhpas (mgr.14170) 33 : cephadm [INF] Deploying daemon prometheus.vm01 on vm01 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: cephadm 2026-04-16T19:21:20.072171+0000 mgr.vm01.nwhpas (mgr.14170) 33 : cephadm [INF] Deploying daemon prometheus.vm01 on vm01 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:20.104391+0000 mon.vm01 (mon.0) 166 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:20.104391+0000 mon.vm01 (mon.0) 166 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:20.181296+0000 mon.vm01 (mon.0) 167 : audit [DBG] from='client.? 192.168.123.104:0/3391701038' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:20.972 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:20 vm01 bash[28222]: audit 2026-04-16T19:21:20.181296+0000 mon.vm01 (mon.0) 167 : audit [DBG] from='client.? 192.168.123.104:0/3391701038' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:21.276 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:21.276 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:21.551 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:21.551 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:21.988 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:21.988 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:21.988 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:22.238 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:22 vm01 bash[28222]: cluster 2026-04-16T19:21:20.818875+0000 mgr.vm01.nwhpas (mgr.14170) 34 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:22.239 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:22 vm01 bash[28222]: cluster 2026-04-16T19:21:20.818875+0000 mgr.vm01.nwhpas (mgr.14170) 34 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:22.239 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:22 vm01 bash[28222]: audit 2026-04-16T19:21:21.987493+0000 mon.vm01 (mon.0) 168 : audit [DBG] from='client.? 192.168.123.104:0/3492712705' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:22.239 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:22 vm01 bash[28222]: audit 2026-04-16T19:21:21.987493+0000 mon.vm01 (mon.0) 168 : audit [DBG] from='client.? 192.168.123.104:0/3492712705' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:23.057 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:23.057 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:23.335 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:23.335 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:23.773 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:23.773 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:23.773 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:24.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:24 vm01 bash[28222]: cluster 2026-04-16T19:21:22.819064+0000 mgr.vm01.nwhpas (mgr.14170) 35 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:24.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:24 vm01 bash[28222]: cluster 2026-04-16T19:21:22.819064+0000 mgr.vm01.nwhpas (mgr.14170) 35 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:24.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:24 vm01 bash[28222]: audit 2026-04-16T19:21:23.772332+0000 mon.vm01 (mon.0) 169 : audit [DBG] from='client.? 192.168.123.104:0/2558060514' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:24.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:24 vm01 bash[28222]: audit 2026-04-16T19:21:23.772332+0000 mon.vm01 (mon.0) 169 : audit [DBG] from='client.? 192.168.123.104:0/2558060514' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:24.848 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:24.849 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:25.102 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:25.103 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:25.523 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:25.523 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:25.523 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:25.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:25 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:25.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:25 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: cluster 2026-04-16T19:21:24.819219+0000 mgr.vm01.nwhpas (mgr.14170) 36 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: cluster 2026-04-16T19:21:24.819219+0000 mgr.vm01.nwhpas (mgr.14170) 36 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: audit 2026-04-16T19:21:25.522606+0000 mon.vm01 (mon.0) 170 : audit [DBG] from='client.? 192.168.123.104:0/2521455909' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: audit 2026-04-16T19:21:25.522606+0000 mon.vm01 (mon.0) 170 : audit [DBG] from='client.? 192.168.123.104:0/2521455909' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: audit 2026-04-16T19:21:25.740889+0000 mon.vm01 (mon.0) 171 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: audit 2026-04-16T19:21:25.740889+0000 mon.vm01 (mon.0) 171 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: audit 2026-04-16T19:21:25.744252+0000 mon.vm01 (mon.0) 172 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: audit 2026-04-16T19:21:25.744252+0000 mon.vm01 (mon.0) 172 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: audit 2026-04-16T19:21:25.747333+0000 mon.vm01 (mon.0) 173 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: audit 2026-04-16T19:21:25.747333+0000 mon.vm01 (mon.0) 173 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: audit 2026-04-16T19:21:25.749031+0000 mon.vm01 (mon.0) 174 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr module enable", "module": "prometheus"} : dispatch 2026-04-16T19:21:26.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:26 vm01 bash[28222]: audit 2026-04-16T19:21:25.749031+0000 mon.vm01 (mon.0) 174 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr module enable", "module": "prometheus"} : dispatch 2026-04-16T19:21:26.597 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:26.597 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:26.881 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:26.881 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:27.315 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:27.316 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:27.316 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:28.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:27 vm01 bash[28222]: audit 2026-04-16T19:21:26.749381+0000 mon.vm01 (mon.0) 175 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-04-16T19:21:28.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:27 vm01 bash[28222]: audit 2026-04-16T19:21:26.749381+0000 mon.vm01 (mon.0) 175 : audit [INF] from='mgr.14170 192.168.123.101:0/2709727076' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-04-16T19:21:28.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:27 vm01 bash[28222]: cluster 2026-04-16T19:21:26.751920+0000 mon.vm01 (mon.0) 176 : cluster [DBG] mgrmap e14: vm01.nwhpas(active, since 31s) 2026-04-16T19:21:28.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:27 vm01 bash[28222]: cluster 2026-04-16T19:21:26.751920+0000 mon.vm01 (mon.0) 176 : cluster [DBG] mgrmap e14: vm01.nwhpas(active, since 31s) 2026-04-16T19:21:28.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:27 vm01 bash[28222]: audit 2026-04-16T19:21:27.315334+0000 mon.vm01 (mon.0) 177 : audit [DBG] from='client.? 192.168.123.104:0/643212608' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:28.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:27 vm01 bash[28222]: audit 2026-04-16T19:21:27.315334+0000 mon.vm01 (mon.0) 177 : audit [DBG] from='client.? 192.168.123.104:0/643212608' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:28.382 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:28.383 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:28.645 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:28.645 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:29.057 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:29.057 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:29.057 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:29.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:29 vm01 bash[28222]: audit 2026-04-16T19:21:29.056884+0000 mon.vm01 (mon.0) 178 : audit [DBG] from='client.? 192.168.123.104:0/2360928510' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:29.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:29 vm01 bash[28222]: audit 2026-04-16T19:21:29.056884+0000 mon.vm01 (mon.0) 178 : audit [DBG] from='client.? 192.168.123.104:0/2360928510' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:30.134 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:30.134 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:30.408 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:30.408 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:30.819 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:30.819 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:30.819 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:31.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:30 vm01 bash[28222]: audit 2026-04-16T19:21:30.818826+0000 mon.vm01 (mon.0) 179 : audit [DBG] from='client.? 192.168.123.104:0/157617330' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:31.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:30 vm01 bash[28222]: audit 2026-04-16T19:21:30.818826+0000 mon.vm01 (mon.0) 179 : audit [DBG] from='client.? 192.168.123.104:0/157617330' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:31.882 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:31.882 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:32.161 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:32.161 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:32.577 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:32.577 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:32.578 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:32.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:32 vm01 bash[28222]: audit 2026-04-16T19:21:32.577191+0000 mon.vm01 (mon.0) 180 : audit [DBG] from='client.? 192.168.123.104:0/1519937718' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:32.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:32 vm01 bash[28222]: audit 2026-04-16T19:21:32.577191+0000 mon.vm01 (mon.0) 180 : audit [DBG] from='client.? 192.168.123.104:0/1519937718' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:33.639 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:33.640 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:33.909 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:33.909 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:34.326 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:34.326 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:34.326 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:34.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:34 vm01 bash[28222]: audit 2026-04-16T19:21:34.325644+0000 mon.vm01 (mon.0) 181 : audit [DBG] from='client.? 192.168.123.104:0/3704336877' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:34.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:34 vm01 bash[28222]: audit 2026-04-16T19:21:34.325644+0000 mon.vm01 (mon.0) 181 : audit [DBG] from='client.? 192.168.123.104:0/3704336877' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:35.566 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:35.566 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:35.846 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:35.846 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:36.267 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:36.267 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:36.267 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:36.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:36 vm01 bash[28222]: audit 2026-04-16T19:21:36.266775+0000 mon.vm01 (mon.0) 182 : audit [DBG] from='client.? 192.168.123.104:0/185916968' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:36.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:36 vm01 bash[28222]: audit 2026-04-16T19:21:36.266775+0000 mon.vm01 (mon.0) 182 : audit [DBG] from='client.? 192.168.123.104:0/185916968' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:37.356 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:37.356 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:37.615 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:37.615 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: cluster 2026-04-16T19:21:37.248255+0000 mon.vm01 (mon.0) 183 : cluster [INF] Active manager daemon vm01.nwhpas restarted 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: cluster 2026-04-16T19:21:37.248255+0000 mon.vm01 (mon.0) 183 : cluster [INF] Active manager daemon vm01.nwhpas restarted 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: cluster 2026-04-16T19:21:37.248538+0000 mon.vm01 (mon.0) 184 : cluster [INF] Activating manager daemon vm01.nwhpas 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: cluster 2026-04-16T19:21:37.248538+0000 mon.vm01 (mon.0) 184 : cluster [INF] Activating manager daemon vm01.nwhpas 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: cluster 2026-04-16T19:21:37.253854+0000 mon.vm01 (mon.0) 185 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: cluster 2026-04-16T19:21:37.253854+0000 mon.vm01 (mon.0) 185 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: cluster 2026-04-16T19:21:37.253963+0000 mon.vm01 (mon.0) 186 : cluster [DBG] mgrmap e15: vm01.nwhpas(active, starting, since 0.00555594s) 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: cluster 2026-04-16T19:21:37.253963+0000 mon.vm01 (mon.0) 186 : cluster [DBG] mgrmap e15: vm01.nwhpas(active, starting, since 0.00555594s) 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: audit 2026-04-16T19:21:37.256057+0000 mon.vm01 (mon.0) 187 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: audit 2026-04-16T19:21:37.256057+0000 mon.vm01 (mon.0) 187 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: audit 2026-04-16T19:21:37.256482+0000 mon.vm01 (mon.0) 188 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm01.nwhpas", "id": "vm01.nwhpas"} : dispatch 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: audit 2026-04-16T19:21:37.256482+0000 mon.vm01 (mon.0) 188 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm01.nwhpas", "id": "vm01.nwhpas"} : dispatch 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: audit 2026-04-16T19:21:37.257172+0000 mon.vm01 (mon.0) 189 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mds metadata"} : dispatch 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: audit 2026-04-16T19:21:37.257172+0000 mon.vm01 (mon.0) 189 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mds metadata"} : dispatch 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: audit 2026-04-16T19:21:37.257298+0000 mon.vm01 (mon.0) 190 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata"} : dispatch 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: audit 2026-04-16T19:21:37.257298+0000 mon.vm01 (mon.0) 190 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata"} : dispatch 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: audit 2026-04-16T19:21:37.257412+0000 mon.vm01 (mon.0) 191 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata"} : dispatch 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: audit 2026-04-16T19:21:37.257412+0000 mon.vm01 (mon.0) 191 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata"} : dispatch 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: cluster 2026-04-16T19:21:37.262058+0000 mon.vm01 (mon.0) 192 : cluster [INF] Manager daemon vm01.nwhpas is now available 2026-04-16T19:21:37.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:37 vm01 bash[28222]: cluster 2026-04-16T19:21:37.262058+0000 mon.vm01 (mon.0) 192 : cluster [INF] Manager daemon vm01.nwhpas is now available 2026-04-16T19:21:38.082 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:38.082 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:38.083 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.535563+0000 mon.vm01 (mon.0) 193 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:21:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.535563+0000 mon.vm01 (mon.0) 193 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:21:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.541134+0000 mon.vm01 (mon.0) 194 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.541134+0000 mon.vm01 (mon.0) 194 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.558402+0000 mon.vm01 (mon.0) 195 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:21:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.558402+0000 mon.vm01 (mon.0) 195 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:21:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.558555+0000 mon.vm01 (mon.0) 196 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:21:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.558555+0000 mon.vm01 (mon.0) 196 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:21:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.565992+0000 mon.vm01 (mon.0) 197 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/mirror_snapshot_schedule"} : dispatch 2026-04-16T19:21:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.565992+0000 mon.vm01 (mon.0) 197 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/mirror_snapshot_schedule"} : dispatch 2026-04-16T19:21:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.604806+0000 mon.vm01 (mon.0) 198 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/trash_purge_schedule"} : dispatch 2026-04-16T19:21:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:37.604806+0000 mon.vm01 (mon.0) 198 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm01.nwhpas/trash_purge_schedule"} : dispatch 2026-04-16T19:21:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:38.082144+0000 mon.vm01 (mon.0) 199 : audit [DBG] from='client.? 192.168.123.104:0/2714499018' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:38.082144+0000 mon.vm01 (mon.0) 199 : audit [DBG] from='client.? 192.168.123.104:0/2714499018' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:38.163830+0000 mon.vm01 (mon.0) 200 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: audit 2026-04-16T19:21:38.163830+0000 mon.vm01 (mon.0) 200 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: cluster 2026-04-16T19:21:38.258551+0000 mon.vm01 (mon.0) 201 : cluster [DBG] mgrmap e16: vm01.nwhpas(active, since 1.01014s) 2026-04-16T19:21:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:38 vm01 bash[28222]: cluster 2026-04-16T19:21:38.258551+0000 mon.vm01 (mon.0) 201 : cluster [DBG] mgrmap e16: vm01.nwhpas(active, since 1.01014s) 2026-04-16T19:21:39.156 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:39.156 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:39.433 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:39.433 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:39.851 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:39.852 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:39.852 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: audit 2026-04-16T19:21:38.472080+0000 mon.vm01 (mon.0) 202 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: audit 2026-04-16T19:21:38.472080+0000 mon.vm01 (mon.0) 202 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: cephadm 2026-04-16T19:21:38.494070+0000 mgr.vm01.nwhpas (mgr.14227) 2 : cephadm [INF] [16/Apr/2026:19:21:38] ENGINE Bus STARTING 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: cephadm 2026-04-16T19:21:38.494070+0000 mgr.vm01.nwhpas (mgr.14227) 2 : cephadm [INF] [16/Apr/2026:19:21:38] ENGINE Bus STARTING 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: cephadm 2026-04-16T19:21:38.596590+0000 mgr.vm01.nwhpas (mgr.14227) 3 : cephadm [INF] [16/Apr/2026:19:21:38] ENGINE Serving on http://192.168.123.101:8765 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: cephadm 2026-04-16T19:21:38.596590+0000 mgr.vm01.nwhpas (mgr.14227) 3 : cephadm [INF] [16/Apr/2026:19:21:38] ENGINE Serving on http://192.168.123.101:8765 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: cephadm 2026-04-16T19:21:38.705857+0000 mgr.vm01.nwhpas (mgr.14227) 4 : cephadm [INF] [16/Apr/2026:19:21:38] ENGINE Serving on https://192.168.123.101:7150 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: cephadm 2026-04-16T19:21:38.705857+0000 mgr.vm01.nwhpas (mgr.14227) 4 : cephadm [INF] [16/Apr/2026:19:21:38] ENGINE Serving on https://192.168.123.101:7150 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: cephadm 2026-04-16T19:21:38.705900+0000 mgr.vm01.nwhpas (mgr.14227) 5 : cephadm [INF] [16/Apr/2026:19:21:38] ENGINE Bus STARTED 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: cephadm 2026-04-16T19:21:38.705900+0000 mgr.vm01.nwhpas (mgr.14227) 5 : cephadm [INF] [16/Apr/2026:19:21:38] ENGINE Bus STARTED 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: cephadm 2026-04-16T19:21:38.706339+0000 mgr.vm01.nwhpas (mgr.14227) 6 : cephadm [INF] [16/Apr/2026:19:21:38] ENGINE Client ('192.168.123.101', 44546) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: cephadm 2026-04-16T19:21:38.706339+0000 mgr.vm01.nwhpas (mgr.14227) 6 : cephadm [INF] [16/Apr/2026:19:21:38] ENGINE Client ('192.168.123.101', 44546) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: audit 2026-04-16T19:21:39.085227+0000 mon.vm01 (mon.0) 203 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:39.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:39 vm01 bash[28222]: audit 2026-04-16T19:21:39.085227+0000 mon.vm01 (mon.0) 203 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:40.921 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:40.921 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:40.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:40 vm01 bash[28222]: audit 2026-04-16T19:21:39.851207+0000 mon.vm01 (mon.0) 204 : audit [DBG] from='client.? 192.168.123.104:0/3385893197' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:40.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:40 vm01 bash[28222]: audit 2026-04-16T19:21:39.851207+0000 mon.vm01 (mon.0) 204 : audit [DBG] from='client.? 192.168.123.104:0/3385893197' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:40.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:40 vm01 bash[28222]: cluster 2026-04-16T19:21:40.089271+0000 mon.vm01 (mon.0) 205 : cluster [DBG] mgrmap e17: vm01.nwhpas(active, since 2s) 2026-04-16T19:21:40.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:40 vm01 bash[28222]: cluster 2026-04-16T19:21:40.089271+0000 mon.vm01 (mon.0) 205 : cluster [DBG] mgrmap e17: vm01.nwhpas(active, since 2s) 2026-04-16T19:21:41.205 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:41.205 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:41.626 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:41.626 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:41.626 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:40.807436+0000 mon.vm01 (mon.0) 206 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:40.807436+0000 mon.vm01 (mon.0) 206 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:40.810402+0000 mon.vm01 (mon.0) 207 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:40.810402+0000 mon.vm01 (mon.0) 207 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:40.813954+0000 mon.vm01 (mon.0) 208 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:40.813954+0000 mon.vm01 (mon.0) 208 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:40.816774+0000 mon.vm01 (mon.0) 209 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:40.816774+0000 mon.vm01 (mon.0) 209 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:40.817500+0000 mon.vm01 (mon.0) 210 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:40.817500+0000 mon.vm01 (mon.0) 210 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:41.625174+0000 mon.vm01 (mon.0) 211 : audit [DBG] from='client.? 192.168.123.104:0/1544176457' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:41 vm01 bash[28222]: audit 2026-04-16T19:21:41.625174+0000 mon.vm01 (mon.0) 211 : audit [DBG] from='client.? 192.168.123.104:0/1544176457' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:42.718 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:42.718 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:42.988 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:42.988 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-16T19:21:43.412 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:43.412 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:43.413 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.062189+0000 mon.vm01 (mon.0) 212 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.062189+0000 mon.vm01 (mon.0) 212 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.064947+0000 mon.vm01 (mon.0) 213 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.064947+0000 mon.vm01 (mon.0) 213 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.411714+0000 mon.vm01 (mon.0) 214 : audit [DBG] from='client.? 192.168.123.104:0/2957565297' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.411714+0000 mon.vm01 (mon.0) 214 : audit [DBG] from='client.? 192.168.123.104:0/2957565297' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.726259+0000 mon.vm01 (mon.0) 215 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.726259+0000 mon.vm01 (mon.0) 215 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.728869+0000 mon.vm01 (mon.0) 216 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.728869+0000 mon.vm01 (mon.0) 216 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.729675+0000 mon.vm01 (mon.0) 217 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.729675+0000 mon.vm01 (mon.0) 217 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.730346+0000 mon.vm01 (mon.0) 218 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.730346+0000 mon.vm01 (mon.0) 218 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.730769+0000 mon.vm01 (mon.0) 219 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.730769+0000 mon.vm01 (mon.0) 219 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.731378+0000 mgr.vm01.nwhpas (mgr.14227) 7 : cephadm [INF] Updating vm01:/etc/ceph/ceph.conf 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.731378+0000 mgr.vm01.nwhpas (mgr.14227) 7 : cephadm [INF] Updating vm01:/etc/ceph/ceph.conf 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.731511+0000 mgr.vm01.nwhpas (mgr.14227) 8 : cephadm [INF] Updating vm04:/etc/ceph/ceph.conf 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.731511+0000 mgr.vm01.nwhpas (mgr.14227) 8 : cephadm [INF] Updating vm04:/etc/ceph/ceph.conf 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.770383+0000 mgr.vm01.nwhpas (mgr.14227) 9 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.770383+0000 mgr.vm01.nwhpas (mgr.14227) 9 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.773643+0000 mgr.vm01.nwhpas (mgr.14227) 10 : cephadm [INF] Updating vm04:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.773643+0000 mgr.vm01.nwhpas (mgr.14227) 10 : cephadm [INF] Updating vm04:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.886173+0000 mon.vm01 (mon.0) 220 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.886173+0000 mon.vm01 (mon.0) 220 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.889007+0000 mon.vm01 (mon.0) 221 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.889007+0000 mon.vm01 (mon.0) 221 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.899910+0000 mon.vm01 (mon.0) 222 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.899910+0000 mon.vm01 (mon.0) 222 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.902515+0000 mon.vm01 (mon.0) 223 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.902515+0000 mon.vm01 (mon.0) 223 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.905042+0000 mon.vm01 (mon.0) 224 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.905042+0000 mon.vm01 (mon.0) 224 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.905890+0000 mon.vm01 (mon.0) 225 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.905890+0000 mon.vm01 (mon.0) 225 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.907096+0000 mon.vm01 (mon.0) 226 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.907096+0000 mon.vm01 (mon.0) 226 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.908403+0000 mon.vm01 (mon.0) 227 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:44.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:44 vm01 bash[28222]: audit 2026-04-16T19:21:43.908403+0000 mon.vm01 (mon.0) 227 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:44.508 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:44.508 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:44.793 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:45.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:45 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.808814+0000 mgr.vm01.nwhpas (mgr.14227) 11 : cephadm [INF] Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-04-16T19:21:45.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:45 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.808814+0000 mgr.vm01.nwhpas (mgr.14227) 11 : cephadm [INF] Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-04-16T19:21:45.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:45 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.818538+0000 mgr.vm01.nwhpas (mgr.14227) 12 : cephadm [INF] Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-04-16T19:21:45.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:45 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.818538+0000 mgr.vm01.nwhpas (mgr.14227) 12 : cephadm [INF] Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-04-16T19:21:45.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:45 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.844697+0000 mgr.vm01.nwhpas (mgr.14227) 13 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.client.admin.keyring 2026-04-16T19:21:45.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:45 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.844697+0000 mgr.vm01.nwhpas (mgr.14227) 13 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.client.admin.keyring 2026-04-16T19:21:45.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:45 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.860533+0000 mgr.vm01.nwhpas (mgr.14227) 14 : cephadm [INF] Updating vm04:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.client.admin.keyring 2026-04-16T19:21:45.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:45 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.860533+0000 mgr.vm01.nwhpas (mgr.14227) 14 : cephadm [INF] Updating vm04:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.client.admin.keyring 2026-04-16T19:21:45.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:45 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.908899+0000 mgr.vm01.nwhpas (mgr.14227) 15 : cephadm [INF] Deploying daemon ceph-exporter.vm04 on vm04 2026-04-16T19:21:45.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:45 vm01 bash[28222]: cephadm 2026-04-16T19:21:43.908899+0000 mgr.vm01.nwhpas (mgr.14227) 15 : cephadm [INF] Deploying daemon ceph-exporter.vm04 on vm04 2026-04-16T19:21:45.469 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:45.469 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:45.469 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.468782+0000 mon.vm01 (mon.0) 228 : audit [DBG] from='client.? 192.168.123.104:0/1396538554' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.468782+0000 mon.vm01 (mon.0) 228 : audit [DBG] from='client.? 192.168.123.104:0/1396538554' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.551415+0000 mon.vm01 (mon.0) 229 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.551415+0000 mon.vm01 (mon.0) 229 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.555833+0000 mon.vm01 (mon.0) 230 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.555833+0000 mon.vm01 (mon.0) 230 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.559855+0000 mon.vm01 (mon.0) 231 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.559855+0000 mon.vm01 (mon.0) 231 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.563565+0000 mon.vm01 (mon.0) 232 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.563565+0000 mon.vm01 (mon.0) 232 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.564663+0000 mon.vm01 (mon.0) 233 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.564663+0000 mon.vm01 (mon.0) 233 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:21:46.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.565859+0000 mon.vm01 (mon.0) 234 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-16T19:21:46.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.565859+0000 mon.vm01 (mon.0) 234 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-16T19:21:46.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.567334+0000 mon.vm01 (mon.0) 235 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:46.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: audit 2026-04-16T19:21:45.567334+0000 mon.vm01 (mon.0) 235 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:46.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: cephadm 2026-04-16T19:21:45.567978+0000 mgr.vm01.nwhpas (mgr.14227) 16 : cephadm [INF] Deploying daemon crash.vm04 on vm04 2026-04-16T19:21:46.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:46 vm01 bash[28222]: cephadm 2026-04-16T19:21:45.567978+0000 mgr.vm01.nwhpas (mgr.14227) 16 : cephadm [INF] Deploying daemon crash.vm04 on vm04 2026-04-16T19:21:46.591 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:46.591 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:46.880 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:47.647 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:47.647 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":1,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:20:05.536485Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-16T19:21:47.647 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 1 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:46.453388+0000 mon.vm01 (mon.0) 236 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:46.453388+0000 mon.vm01 (mon.0) 236 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:46.456064+0000 mon.vm01 (mon.0) 237 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:46.456064+0000 mon.vm01 (mon.0) 237 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:46.458387+0000 mon.vm01 (mon.0) 238 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:46.458387+0000 mon.vm01 (mon.0) 238 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:46.460731+0000 mon.vm01 (mon.0) 239 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:46.460731+0000 mon.vm01 (mon.0) 239 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: cephadm 2026-04-16T19:21:46.461655+0000 mgr.vm01.nwhpas (mgr.14227) 17 : cephadm [INF] Deploying daemon node-exporter.vm04 on vm04 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: cephadm 2026-04-16T19:21:46.461655+0000 mgr.vm01.nwhpas (mgr.14227) 17 : cephadm [INF] Deploying daemon node-exporter.vm04 on vm04 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.296717+0000 mon.vm01 (mon.0) 240 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.296717+0000 mon.vm01 (mon.0) 240 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.302006+0000 mon.vm01 (mon.0) 241 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.302006+0000 mon.vm01 (mon.0) 241 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.306500+0000 mon.vm01 (mon.0) 242 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.306500+0000 mon.vm01 (mon.0) 242 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.309840+0000 mon.vm01 (mon.0) 243 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.309840+0000 mon.vm01 (mon.0) 243 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.311938+0000 mon.vm01 (mon.0) 244 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm04.ztqrcx", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.311938+0000 mon.vm01 (mon.0) 244 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm04.ztqrcx", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.313677+0000 mon.vm01 (mon.0) 245 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm04.ztqrcx", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.313677+0000 mon.vm01 (mon.0) 245 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm04.ztqrcx", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.316738+0000 mon.vm01 (mon.0) 246 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr services"} : dispatch 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.316738+0000 mon.vm01 (mon.0) 246 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr services"} : dispatch 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.317600+0000 mon.vm01 (mon.0) 247 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:47.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:47 vm01 bash[28222]: audit 2026-04-16T19:21:47.317600+0000 mon.vm01 (mon.0) 247 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:48.761 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-16T19:21:48.762 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mon dump -f json 2026-04-16T19:21:48.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: cephadm 2026-04-16T19:21:47.318450+0000 mgr.vm01.nwhpas (mgr.14227) 18 : cephadm [INF] Deploying daemon mgr.vm04.ztqrcx on vm04 2026-04-16T19:21:48.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: cephadm 2026-04-16T19:21:47.318450+0000 mgr.vm01.nwhpas (mgr.14227) 18 : cephadm [INF] Deploying daemon mgr.vm04.ztqrcx on vm04 2026-04-16T19:21:48.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:47.541767+0000 mon.vm01 (mon.0) 248 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:48.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:47.541767+0000 mon.vm01 (mon.0) 248 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:48.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:47.646519+0000 mon.vm01 (mon.0) 249 : audit [DBG] from='client.? 192.168.123.104:0/4125974359' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:48.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:47.646519+0000 mon.vm01 (mon.0) 249 : audit [DBG] from='client.? 192.168.123.104:0/4125974359' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:48.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.283807+0000 mon.vm01 (mon.0) 250 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:48.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.283807+0000 mon.vm01 (mon.0) 250 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:48.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.286349+0000 mon.vm01 (mon.0) 251 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:48.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.286349+0000 mon.vm01 (mon.0) 251 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:48.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.288658+0000 mon.vm01 (mon.0) 252 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:48.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.288658+0000 mon.vm01 (mon.0) 252 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:48.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.290824+0000 mon.vm01 (mon.0) 253 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:48.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.290824+0000 mon.vm01 (mon.0) 253 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:48.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.291916+0000 mon.vm01 (mon.0) 254 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-16T19:21:48.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.291916+0000 mon.vm01 (mon.0) 254 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-16T19:21:48.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.292413+0000 mon.vm01 (mon.0) 255 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:48.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:48 vm01 bash[28222]: audit 2026-04-16T19:21:48.292413+0000 mon.vm01 (mon.0) 255 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:49.115 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm04/config 2026-04-16T19:21:49.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:49 vm04 bash[34817]: debug 2026-04-16T19:21:49.846+0000 7f4c8a10f640 1 mon.vm04@-1(synchronizing).paxosservice(auth 1..8) refresh upgraded, format 0 -> 3 2026-04-16T19:21:49.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:49 vm01 bash[28222]: cephadm 2026-04-16T19:21:48.292988+0000 mgr.vm01.nwhpas (mgr.14227) 19 : cephadm [INF] Deploying daemon mon.vm04 on vm04 2026-04-16T19:21:49.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:49 vm01 bash[28222]: cephadm 2026-04-16T19:21:48.292988+0000 mgr.vm01.nwhpas (mgr.14227) 19 : cephadm [INF] Deploying daemon mon.vm04 on vm04 2026-04-16T19:21:49.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:49 vm01 bash[28222]: audit 2026-04-16T19:21:49.535543+0000 mon.vm01 (mon.0) 256 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:49.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:49 vm01 bash[28222]: audit 2026-04-16T19:21:49.535543+0000 mon.vm01 (mon.0) 256 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:49.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:49 vm01 bash[28222]: audit 2026-04-16T19:21:49.540178+0000 mon.vm01 (mon.0) 257 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:49.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:49 vm01 bash[28222]: audit 2026-04-16T19:21:49.540178+0000 mon.vm01 (mon.0) 257 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:54.874 INFO:teuthology.orchestra.run.vm04.stdout: 2026-04-16T19:21:54.874 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":2,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","modified":"2026-04-16T19:21:49.856939Z","created":"2026-04-16T19:20:05.536485Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm04","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-04-16T19:21:54.874 INFO:teuthology.orchestra.run.vm04.stderr:dumped monmap epoch 2 2026-04-16T19:21:54.956 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-04-16T19:21:54.956 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph config generate-minimal-conf 2026-04-16T19:21:55.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:49.860606+0000 mon.vm01 (mon.0) 263 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:21:55.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:49.860606+0000 mon.vm01 (mon.0) 263 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:21:55.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:49.860776+0000 mon.vm01 (mon.0) 264 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:49.860776+0000 mon.vm01 (mon.0) 264 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:49.860922+0000 mon.vm01 (mon.0) 265 : cluster [INF] mon.vm01 calling monitor election 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:49.860922+0000 mon.vm01 (mon.0) 265 : cluster [INF] mon.vm01 calling monitor election 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:50.002342+0000 mon.vm01 (mon.0) 266 : audit [DBG] from='client.? 192.168.123.104:0/3894697179' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:50.002342+0000 mon.vm01 (mon.0) 266 : audit [DBG] from='client.? 192.168.123.104:0/3894697179' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:50.856513+0000 mon.vm01 (mon.0) 267 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:50.856513+0000 mon.vm01 (mon.0) 267 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:51.856653+0000 mon.vm01 (mon.0) 268 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:51.856653+0000 mon.vm01 (mon.0) 268 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:51.857522+0000 mon.vm04 (mon.1) 1 : cluster [INF] mon.vm04 calling monitor election 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:51.857522+0000 mon.vm04 (mon.1) 1 : cluster [INF] mon.vm04 calling monitor election 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:52.557931+0000 mon.vm01 (mon.0) 269 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:52.557931+0000 mon.vm01 (mon.0) 269 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:52.857279+0000 mon.vm01 (mon.0) 270 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:52.857279+0000 mon.vm01 (mon.0) 270 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:53.856701+0000 mon.vm01 (mon.0) 271 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:53.856701+0000 mon.vm01 (mon.0) 271 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.857204+0000 mon.vm01 (mon.0) 272 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.857204+0000 mon.vm01 (mon.0) 272 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.867447+0000 mon.vm01 (mon.0) 273 : cluster [INF] mon.vm01 is new leader, mons vm01,vm04 in quorum (ranks 0,1) 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.867447+0000 mon.vm01 (mon.0) 273 : cluster [INF] mon.vm01 is new leader, mons vm01,vm04 in quorum (ranks 0,1) 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872362+0000 mon.vm01 (mon.0) 274 : cluster [DBG] monmap epoch 2 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872362+0000 mon.vm01 (mon.0) 274 : cluster [DBG] monmap epoch 2 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872400+0000 mon.vm01 (mon.0) 275 : cluster [DBG] fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872400+0000 mon.vm01 (mon.0) 275 : cluster [DBG] fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872414+0000 mon.vm01 (mon.0) 276 : cluster [DBG] last_changed 2026-04-16T19:21:49.856939+0000 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872414+0000 mon.vm01 (mon.0) 276 : cluster [DBG] last_changed 2026-04-16T19:21:49.856939+0000 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872429+0000 mon.vm01 (mon.0) 277 : cluster [DBG] created 2026-04-16T19:20:05.536485+0000 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872429+0000 mon.vm01 (mon.0) 277 : cluster [DBG] created 2026-04-16T19:20:05.536485+0000 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872443+0000 mon.vm01 (mon.0) 278 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872443+0000 mon.vm01 (mon.0) 278 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872457+0000 mon.vm01 (mon.0) 279 : cluster [DBG] election_strategy: 1 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872457+0000 mon.vm01 (mon.0) 279 : cluster [DBG] election_strategy: 1 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872471+0000 mon.vm01 (mon.0) 280 : cluster [DBG] 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.vm01 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872471+0000 mon.vm01 (mon.0) 280 : cluster [DBG] 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.vm01 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872484+0000 mon.vm01 (mon.0) 281 : cluster [DBG] 1: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.vm04 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.872484+0000 mon.vm01 (mon.0) 281 : cluster [DBG] 1: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.vm04 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.873016+0000 mon.vm01 (mon.0) 282 : cluster [DBG] fsmap 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.873016+0000 mon.vm01 (mon.0) 282 : cluster [DBG] fsmap 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.873047+0000 mon.vm01 (mon.0) 283 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.873047+0000 mon.vm01 (mon.0) 283 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.873234+0000 mon.vm01 (mon.0) 284 : cluster [DBG] mgrmap e17: vm01.nwhpas(active, since 17s) 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.873234+0000 mon.vm01 (mon.0) 284 : cluster [DBG] mgrmap e17: vm01.nwhpas(active, since 17s) 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.873366+0000 mon.vm01 (mon.0) 285 : cluster [INF] overall HEALTH_OK 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: cluster 2026-04-16T19:21:54.873366+0000 mon.vm01 (mon.0) 285 : cluster [INF] overall HEALTH_OK 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.880170+0000 mon.vm01 (mon.0) 286 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.880170+0000 mon.vm01 (mon.0) 286 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.886315+0000 mon.vm01 (mon.0) 287 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.886315+0000 mon.vm01 (mon.0) 287 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.892240+0000 mon.vm01 (mon.0) 288 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.892240+0000 mon.vm01 (mon.0) 288 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.893285+0000 mon.vm01 (mon.0) 289 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.893285+0000 mon.vm01 (mon.0) 289 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.893954+0000 mon.vm01 (mon.0) 290 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:21:55.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:54 vm04 bash[34817]: audit 2026-04-16T19:21:54.893954+0000 mon.vm01 (mon.0) 290 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:49.860606+0000 mon.vm01 (mon.0) 263 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:49.860606+0000 mon.vm01 (mon.0) 263 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:49.860776+0000 mon.vm01 (mon.0) 264 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:49.860776+0000 mon.vm01 (mon.0) 264 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:49.860922+0000 mon.vm01 (mon.0) 265 : cluster [INF] mon.vm01 calling monitor election 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:49.860922+0000 mon.vm01 (mon.0) 265 : cluster [INF] mon.vm01 calling monitor election 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:50.002342+0000 mon.vm01 (mon.0) 266 : audit [DBG] from='client.? 192.168.123.104:0/3894697179' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:50.002342+0000 mon.vm01 (mon.0) 266 : audit [DBG] from='client.? 192.168.123.104:0/3894697179' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:50.856513+0000 mon.vm01 (mon.0) 267 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:50.856513+0000 mon.vm01 (mon.0) 267 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:51.856653+0000 mon.vm01 (mon.0) 268 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:51.856653+0000 mon.vm01 (mon.0) 268 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:51.857522+0000 mon.vm04 (mon.1) 1 : cluster [INF] mon.vm04 calling monitor election 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:51.857522+0000 mon.vm04 (mon.1) 1 : cluster [INF] mon.vm04 calling monitor election 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:52.557931+0000 mon.vm01 (mon.0) 269 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:52.557931+0000 mon.vm01 (mon.0) 269 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:52.857279+0000 mon.vm01 (mon.0) 270 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:52.857279+0000 mon.vm01 (mon.0) 270 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:53.856701+0000 mon.vm01 (mon.0) 271 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:53.856701+0000 mon.vm01 (mon.0) 271 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.857204+0000 mon.vm01 (mon.0) 272 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.857204+0000 mon.vm01 (mon.0) 272 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.867447+0000 mon.vm01 (mon.0) 273 : cluster [INF] mon.vm01 is new leader, mons vm01,vm04 in quorum (ranks 0,1) 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.867447+0000 mon.vm01 (mon.0) 273 : cluster [INF] mon.vm01 is new leader, mons vm01,vm04 in quorum (ranks 0,1) 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872362+0000 mon.vm01 (mon.0) 274 : cluster [DBG] monmap epoch 2 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872362+0000 mon.vm01 (mon.0) 274 : cluster [DBG] monmap epoch 2 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872400+0000 mon.vm01 (mon.0) 275 : cluster [DBG] fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:21:55.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872400+0000 mon.vm01 (mon.0) 275 : cluster [DBG] fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872414+0000 mon.vm01 (mon.0) 276 : cluster [DBG] last_changed 2026-04-16T19:21:49.856939+0000 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872414+0000 mon.vm01 (mon.0) 276 : cluster [DBG] last_changed 2026-04-16T19:21:49.856939+0000 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872429+0000 mon.vm01 (mon.0) 277 : cluster [DBG] created 2026-04-16T19:20:05.536485+0000 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872429+0000 mon.vm01 (mon.0) 277 : cluster [DBG] created 2026-04-16T19:20:05.536485+0000 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872443+0000 mon.vm01 (mon.0) 278 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872443+0000 mon.vm01 (mon.0) 278 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872457+0000 mon.vm01 (mon.0) 279 : cluster [DBG] election_strategy: 1 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872457+0000 mon.vm01 (mon.0) 279 : cluster [DBG] election_strategy: 1 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872471+0000 mon.vm01 (mon.0) 280 : cluster [DBG] 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.vm01 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872471+0000 mon.vm01 (mon.0) 280 : cluster [DBG] 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.vm01 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872484+0000 mon.vm01 (mon.0) 281 : cluster [DBG] 1: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.vm04 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.872484+0000 mon.vm01 (mon.0) 281 : cluster [DBG] 1: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.vm04 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.873016+0000 mon.vm01 (mon.0) 282 : cluster [DBG] fsmap 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.873016+0000 mon.vm01 (mon.0) 282 : cluster [DBG] fsmap 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.873047+0000 mon.vm01 (mon.0) 283 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.873047+0000 mon.vm01 (mon.0) 283 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.873234+0000 mon.vm01 (mon.0) 284 : cluster [DBG] mgrmap e17: vm01.nwhpas(active, since 17s) 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.873234+0000 mon.vm01 (mon.0) 284 : cluster [DBG] mgrmap e17: vm01.nwhpas(active, since 17s) 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.873366+0000 mon.vm01 (mon.0) 285 : cluster [INF] overall HEALTH_OK 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: cluster 2026-04-16T19:21:54.873366+0000 mon.vm01 (mon.0) 285 : cluster [INF] overall HEALTH_OK 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.880170+0000 mon.vm01 (mon.0) 286 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.880170+0000 mon.vm01 (mon.0) 286 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.886315+0000 mon.vm01 (mon.0) 287 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.886315+0000 mon.vm01 (mon.0) 287 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.892240+0000 mon.vm01 (mon.0) 288 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.892240+0000 mon.vm01 (mon.0) 288 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.893285+0000 mon.vm01 (mon.0) 289 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.893285+0000 mon.vm01 (mon.0) 289 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.893954+0000 mon.vm01 (mon.0) 290 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:21:55.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:54 vm01 bash[28222]: audit 2026-04-16T19:21:54.893954+0000 mon.vm01 (mon.0) 290 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:21:55.251 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:21:55.718 INFO:teuthology.orchestra.run.vm01.stdout:# minimal ceph.conf for 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:21:55.718 INFO:teuthology.orchestra.run.vm01.stdout:[global] 2026-04-16T19:21:55.718 INFO:teuthology.orchestra.run.vm01.stdout: fsid = 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:21:55.718 INFO:teuthology.orchestra.run.vm01.stdout: mon_host = [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] 2026-04-16T19:21:55.797 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-04-16T19:21:55.797 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:21:55.797 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.conf 2026-04-16T19:21:55.809 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:21:55.809 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-16T19:21:55.866 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-16T19:21:55.867 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/ceph/ceph.conf 2026-04-16T19:21:55.877 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-16T19:21:55.877 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-16T19:21:55.930 DEBUG:tasks.cephadm:set 0 configs 2026-04-16T19:21:55.930 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph config dump 2026-04-16T19:21:56.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:55 vm04 bash[34817]: cephadm 2026-04-16T19:21:54.894889+0000 mgr.vm01.nwhpas (mgr.14227) 20 : cephadm [INF] Updating vm01:/etc/ceph/ceph.conf 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:55 vm04 bash[34817]: cephadm 2026-04-16T19:21:54.894889+0000 mgr.vm01.nwhpas (mgr.14227) 20 : cephadm [INF] Updating vm01:/etc/ceph/ceph.conf 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:55 vm04 bash[34817]: cephadm 2026-04-16T19:21:54.895171+0000 mgr.vm01.nwhpas (mgr.14227) 21 : cephadm [INF] Updating vm04:/etc/ceph/ceph.conf 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:55 vm04 bash[34817]: cephadm 2026-04-16T19:21:54.895171+0000 mgr.vm01.nwhpas (mgr.14227) 21 : cephadm [INF] Updating vm04:/etc/ceph/ceph.conf 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:55 vm04 bash[34817]: cephadm 2026-04-16T19:21:54.948721+0000 mgr.vm01.nwhpas (mgr.14227) 22 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:55 vm04 bash[34817]: cephadm 2026-04-16T19:21:54.948721+0000 mgr.vm01.nwhpas (mgr.14227) 22 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:55 vm04 bash[34817]: cephadm 2026-04-16T19:21:54.950372+0000 mgr.vm01.nwhpas (mgr.14227) 23 : cephadm [INF] Updating vm04:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:55 vm04 bash[34817]: cephadm 2026-04-16T19:21:54.950372+0000 mgr.vm01.nwhpas (mgr.14227) 23 : cephadm [INF] Updating vm04:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:54.994474+0000 mon.vm01 (mon.0) 291 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:54.994474+0000 mon.vm01 (mon.0) 291 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.000251+0000 mon.vm01 (mon.0) 292 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.000251+0000 mon.vm01 (mon.0) 292 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.004122+0000 mon.vm01 (mon.0) 293 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.004122+0000 mon.vm01 (mon.0) 293 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.008182+0000 mon.vm01 (mon.0) 294 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.008182+0000 mon.vm01 (mon.0) 294 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.011898+0000 mon.vm01 (mon.0) 295 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.011898+0000 mon.vm01 (mon.0) 295 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: cephadm 2026-04-16T19:21:55.026035+0000 mgr.vm01.nwhpas (mgr.14227) 24 : cephadm [INF] Reconfiguring mon.vm01 (unknown last config time)... 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: cephadm 2026-04-16T19:21:55.026035+0000 mgr.vm01.nwhpas (mgr.14227) 24 : cephadm [INF] Reconfiguring mon.vm01 (unknown last config time)... 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.026351+0000 mon.vm01 (mon.0) 296 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.026351+0000 mon.vm01 (mon.0) 296 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.026899+0000 mon.vm01 (mon.0) 297 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.026899+0000 mon.vm01 (mon.0) 297 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.027305+0000 mon.vm01 (mon.0) 298 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.027305+0000 mon.vm01 (mon.0) 298 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: cephadm 2026-04-16T19:21:55.027803+0000 mgr.vm01.nwhpas (mgr.14227) 25 : cephadm [INF] Reconfiguring daemon mon.vm01 on vm01 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: cephadm 2026-04-16T19:21:55.027803+0000 mgr.vm01.nwhpas (mgr.14227) 25 : cephadm [INF] Reconfiguring daemon mon.vm01 on vm01 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.533921+0000 mon.vm01 (mon.0) 299 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.533921+0000 mon.vm01 (mon.0) 299 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.538810+0000 mon.vm01 (mon.0) 300 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.538810+0000 mon.vm01 (mon.0) 300 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: cephadm 2026-04-16T19:21:55.539712+0000 mgr.vm01.nwhpas (mgr.14227) 26 : cephadm [INF] Reconfiguring crash.vm01 (monmap changed)... 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: cephadm 2026-04-16T19:21:55.539712+0000 mgr.vm01.nwhpas (mgr.14227) 26 : cephadm [INF] Reconfiguring crash.vm01 (monmap changed)... 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.539930+0000 mon.vm01 (mon.0) 301 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.539930+0000 mon.vm01 (mon.0) 301 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.540624+0000 mon.vm01 (mon.0) 302 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.540624+0000 mon.vm01 (mon.0) 302 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: cephadm 2026-04-16T19:21:55.541163+0000 mgr.vm01.nwhpas (mgr.14227) 27 : cephadm [INF] Reconfiguring daemon crash.vm01 on vm01 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: cephadm 2026-04-16T19:21:55.541163+0000 mgr.vm01.nwhpas (mgr.14227) 27 : cephadm [INF] Reconfiguring daemon crash.vm01 on vm01 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.718077+0000 mon.vm01 (mon.0) 303 : audit [DBG] from='client.? 192.168.123.101:0/2119505387' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.718077+0000 mon.vm01 (mon.0) 303 : audit [DBG] from='client.? 192.168.123.101:0/2119505387' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.857077+0000 mon.vm01 (mon.0) 304 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:56.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:56 vm04 bash[34817]: audit 2026-04-16T19:21:55.857077+0000 mon.vm01 (mon.0) 304 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:54.894889+0000 mgr.vm01.nwhpas (mgr.14227) 20 : cephadm [INF] Updating vm01:/etc/ceph/ceph.conf 2026-04-16T19:21:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:54.894889+0000 mgr.vm01.nwhpas (mgr.14227) 20 : cephadm [INF] Updating vm01:/etc/ceph/ceph.conf 2026-04-16T19:21:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:54.895171+0000 mgr.vm01.nwhpas (mgr.14227) 21 : cephadm [INF] Updating vm04:/etc/ceph/ceph.conf 2026-04-16T19:21:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:54.895171+0000 mgr.vm01.nwhpas (mgr.14227) 21 : cephadm [INF] Updating vm04:/etc/ceph/ceph.conf 2026-04-16T19:21:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:54.948721+0000 mgr.vm01.nwhpas (mgr.14227) 22 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:54.948721+0000 mgr.vm01.nwhpas (mgr.14227) 22 : cephadm [INF] Updating vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:54.950372+0000 mgr.vm01.nwhpas (mgr.14227) 23 : cephadm [INF] Updating vm04:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:54.950372+0000 mgr.vm01.nwhpas (mgr.14227) 23 : cephadm [INF] Updating vm04:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/config/ceph.conf 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:54.994474+0000 mon.vm01 (mon.0) 291 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:54.994474+0000 mon.vm01 (mon.0) 291 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.000251+0000 mon.vm01 (mon.0) 292 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.000251+0000 mon.vm01 (mon.0) 292 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.004122+0000 mon.vm01 (mon.0) 293 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.004122+0000 mon.vm01 (mon.0) 293 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.008182+0000 mon.vm01 (mon.0) 294 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.008182+0000 mon.vm01 (mon.0) 294 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.011898+0000 mon.vm01 (mon.0) 295 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.011898+0000 mon.vm01 (mon.0) 295 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:55.026035+0000 mgr.vm01.nwhpas (mgr.14227) 24 : cephadm [INF] Reconfiguring mon.vm01 (unknown last config time)... 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:55.026035+0000 mgr.vm01.nwhpas (mgr.14227) 24 : cephadm [INF] Reconfiguring mon.vm01 (unknown last config time)... 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.026351+0000 mon.vm01 (mon.0) 296 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.026351+0000 mon.vm01 (mon.0) 296 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.026899+0000 mon.vm01 (mon.0) 297 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.026899+0000 mon.vm01 (mon.0) 297 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.027305+0000 mon.vm01 (mon.0) 298 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.027305+0000 mon.vm01 (mon.0) 298 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:55.027803+0000 mgr.vm01.nwhpas (mgr.14227) 25 : cephadm [INF] Reconfiguring daemon mon.vm01 on vm01 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:55.027803+0000 mgr.vm01.nwhpas (mgr.14227) 25 : cephadm [INF] Reconfiguring daemon mon.vm01 on vm01 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.533921+0000 mon.vm01 (mon.0) 299 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.533921+0000 mon.vm01 (mon.0) 299 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.538810+0000 mon.vm01 (mon.0) 300 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.538810+0000 mon.vm01 (mon.0) 300 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:55.539712+0000 mgr.vm01.nwhpas (mgr.14227) 26 : cephadm [INF] Reconfiguring crash.vm01 (monmap changed)... 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:55.539712+0000 mgr.vm01.nwhpas (mgr.14227) 26 : cephadm [INF] Reconfiguring crash.vm01 (monmap changed)... 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.539930+0000 mon.vm01 (mon.0) 301 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.539930+0000 mon.vm01 (mon.0) 301 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.540624+0000 mon.vm01 (mon.0) 302 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.540624+0000 mon.vm01 (mon.0) 302 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:55.541163+0000 mgr.vm01.nwhpas (mgr.14227) 27 : cephadm [INF] Reconfiguring daemon crash.vm01 on vm01 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: cephadm 2026-04-16T19:21:55.541163+0000 mgr.vm01.nwhpas (mgr.14227) 27 : cephadm [INF] Reconfiguring daemon crash.vm01 on vm01 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.718077+0000 mon.vm01 (mon.0) 303 : audit [DBG] from='client.? 192.168.123.101:0/2119505387' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.718077+0000 mon.vm01 (mon.0) 303 : audit [DBG] from='client.? 192.168.123.101:0/2119505387' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.857077+0000 mon.vm01 (mon.0) 304 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:55 vm01 bash[28222]: audit 2026-04-16T19:21:55.857077+0000 mon.vm01 (mon.0) 304 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:21:56.252 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:WHO MASK LEVEL OPTION VALUE RO 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global dev auth_debug true 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global basic container_image harbor.clyso.com/custom-ceph/ceph/ceph@sha256:fc41d50a3963b1f80069c41228698fd9d125d9478206ca50fa68bb35558d951e * 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global dev debug_asserts_on_shutdown true 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global basic log_to_file true 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global basic log_to_journald false 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global basic log_to_stderr false 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced mon_allow_pool_delete true 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced mon_clock_drift_allowed 1.000000 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced mon_cluster_log_to_file true 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced mon_max_pg_per_osd 10000 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced mon_pg_warn_max_object_skew 0.000000 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced mon_warn_on_crush_straw_calc_version_zero false 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced mon_warn_on_legacy_crush_tunables false 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced mon_warn_on_osd_down_out_interval_zero false 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global dev mon_warn_on_pool_pg_num_not_power_of_two false 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced mon_warn_on_too_few_osds false 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global dev ms_die_on_bug true 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global dev ms_die_on_old_message true 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced osd_pool_default_erasure_code_profile plugin=isa technique=reed_sol_van k=2 m=1 crush-failure-domain=osd 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced osd_pool_default_pg_autoscale_mode off 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:global advanced public_network 192.168.123.0/24,192.168.123.1/32 * 2026-04-16T19:21:56.708 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced auth_allow_insecure_global_id_reclaim false 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced auth_mon_ticket_ttl 660.000000 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced auth_service_ticket_ttl 240.000000 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced debug_mon 20/20 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced debug_ms 1/1 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced debug_paxos 20/20 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced mon_data_avail_warn 5 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced mon_mgr_mkfs_grace 240 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon dev mon_osd_prime_pg_temp true 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced mon_osd_reporter_subtree_level osd 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced mon_reweight_min_bytes_per_osd 10 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced mon_reweight_min_pgs_per_osd 4 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced mon_warn_on_insecure_global_id_reclaim false 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mon advanced mon_warn_on_insecure_global_id_reclaim_allowed false 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mgr advanced debug_mgr 20/20 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mgr advanced debug_ms 1/1 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mgr advanced mgr/cephadm/allow_ptrace true * 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mgr advanced mgr/cephadm/container_init True * 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mgr advanced mgr/cephadm/migration_current 7 * 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mgr advanced mgr/dashboard/GRAFANA_API_SSL_VERIFY false * 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mgr advanced mgr/dashboard/ssl_server_port 8443 * 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mgr advanced mgr/orchestrator/orchestrator cephadm 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mgr advanced mon_reweight_min_bytes_per_osd 10 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:mgr advanced mon_reweight_min_pgs_per_osd 4 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd dev bdev_debug_aio true 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced debug_ms 1/1 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced debug_osd 20/20 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd dev osd_debug_misdirected_ops true 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd dev osd_debug_op_order true 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd dev osd_debug_pg_log_writeout true 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd dev osd_debug_shutdown true 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd dev osd_debug_verify_cached_snaps true 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd dev osd_debug_verify_missing_on_start true 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd dev osd_debug_verify_stray_on_activate true 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced osd_deep_scrub_update_digest_min_age 30 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd basic osd_mclock_iops_capacity_threshold_hdd 49000.000000 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced osd_mclock_profile high_recovery_ops 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced osd_memory_target_autotune true 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced osd_op_queue debug_random * 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced osd_op_queue_cut_off debug_random * 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced osd_recover_clone_overlap true 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced osd_recovery_max_chunk 1048576 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced osd_scrub_load_threshold 5.000000 2026-04-16T19:21:56.709 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced osd_scrub_max_interval 600.000000 2026-04-16T19:21:56.710 INFO:teuthology.orchestra.run.vm01.stdout:osd advanced osd_shutdown_pgref_assert true 2026-04-16T19:21:56.710 INFO:teuthology.orchestra.run.vm01.stdout:client.rgw advanced rgw_cache_enabled true 2026-04-16T19:21:56.710 INFO:teuthology.orchestra.run.vm01.stdout:client.rgw advanced rgw_enable_ops_log true 2026-04-16T19:21:56.710 INFO:teuthology.orchestra.run.vm01.stdout:client.rgw advanced rgw_enable_usage_log true 2026-04-16T19:21:56.815 INFO:tasks.cephadm:Deploying OSDs... 2026-04-16T19:21:56.815 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:21:56.815 DEBUG:teuthology.orchestra.run.vm01:> dd if=/scratch_devs of=/dev/stdout 2026-04-16T19:21:56.819 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-16T19:21:56.819 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/nvme0n1 2026-04-16T19:21:56.867 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/nvme0n1 2026-04-16T19:21:56.867 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-16T19:21:56.867 INFO:teuthology.orchestra.run.vm01.stdout:Device: 5h/5d Inode: 896 Links: 1 Device type: 103,1 2026-04-16T19:21:56.867 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-16T19:21:56.867 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-16 19:19:16.010599352 +0000 2026-04-16T19:21:56.867 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-16 19:19:15.966599352 +0000 2026-04-16T19:21:56.867 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-16 19:19:15.966599352 +0000 2026-04-16T19:21:56.867 INFO:teuthology.orchestra.run.vm01.stdout: Birth: - 2026-04-16T19:21:56.867 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-04-16T19:21:56.920 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-16T19:21:56.920 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-16T19:21:56.920 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000274734 s, 1.9 MB/s 2026-04-16T19:21:56.920 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-04-16T19:21:56.968 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/nvme1n1 2026-04-16T19:21:57.015 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/nvme1n1 2026-04-16T19:21:57.015 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-16T19:21:57.016 INFO:teuthology.orchestra.run.vm01.stdout:Device: 5h/5d Inode: 906 Links: 1 Device type: 103,3 2026-04-16T19:21:57.016 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-16T19:21:57.016 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-16 19:19:16.322599352 +0000 2026-04-16T19:21:57.016 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-16 19:19:16.278599352 +0000 2026-04-16T19:21:57.016 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-16 19:19:16.278599352 +0000 2026-04-16T19:21:57.016 INFO:teuthology.orchestra.run.vm01.stdout: Birth: - 2026-04-16T19:21:57.016 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-04-16T19:21:57.068 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-16T19:21:57.068 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-16T19:21:57.068 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000220392 s, 2.3 MB/s 2026-04-16T19:21:57.069 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-04-16T19:21:57.112 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.082487+0000 mon.vm01 (mon.0) 305 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.112 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.082487+0000 mon.vm01 (mon.0) 305 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.112 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.086970+0000 mon.vm01 (mon.0) 306 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.112 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.086970+0000 mon.vm01 (mon.0) 306 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.112 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: cephadm 2026-04-16T19:21:56.087694+0000 mgr.vm01.nwhpas (mgr.14227) 28 : cephadm [INF] Reconfiguring ceph-exporter.vm01 (monmap changed)... 2026-04-16T19:21:57.112 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: cephadm 2026-04-16T19:21:56.087694+0000 mgr.vm01.nwhpas (mgr.14227) 28 : cephadm [INF] Reconfiguring ceph-exporter.vm01 (monmap changed)... 2026-04-16T19:21:57.112 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.087936+0000 mon.vm01 (mon.0) 307 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:21:57.112 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.087936+0000 mon.vm01 (mon.0) 307 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.088650+0000 mon.vm01 (mon.0) 308 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.088650+0000 mon.vm01 (mon.0) 308 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: cephadm 2026-04-16T19:21:56.089252+0000 mgr.vm01.nwhpas (mgr.14227) 29 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm01 on vm01 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: cephadm 2026-04-16T19:21:56.089252+0000 mgr.vm01.nwhpas (mgr.14227) 29 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm01 on vm01 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.693737+0000 mon.vm01 (mon.0) 309 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.693737+0000 mon.vm01 (mon.0) 309 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.699380+0000 mon.vm01 (mon.0) 310 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.699380+0000 mon.vm01 (mon.0) 310 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: cephadm 2026-04-16T19:21:56.700117+0000 mgr.vm01.nwhpas (mgr.14227) 30 : cephadm [INF] Reconfiguring mgr.vm01.nwhpas (unknown last config time)... 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: cephadm 2026-04-16T19:21:56.700117+0000 mgr.vm01.nwhpas (mgr.14227) 30 : cephadm [INF] Reconfiguring mgr.vm01.nwhpas (unknown last config time)... 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.700345+0000 mon.vm01 (mon.0) 311 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm01.nwhpas", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.700345+0000 mon.vm01 (mon.0) 311 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm01.nwhpas", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.701098+0000 mon.vm01 (mon.0) 312 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr services"} : dispatch 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.701098+0000 mon.vm01 (mon.0) 312 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr services"} : dispatch 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.701547+0000 mon.vm01 (mon.0) 313 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.701547+0000 mon.vm01 (mon.0) 313 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: cephadm 2026-04-16T19:21:56.702225+0000 mgr.vm01.nwhpas (mgr.14227) 31 : cephadm [INF] Reconfiguring daemon mgr.vm01.nwhpas on vm01 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: cephadm 2026-04-16T19:21:56.702225+0000 mgr.vm01.nwhpas (mgr.14227) 31 : cephadm [INF] Reconfiguring daemon mgr.vm01.nwhpas on vm01 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.707289+0000 mon.vm01 (mon.0) 314 : audit [DBG] from='client.? 192.168.123.101:0/1020114993' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-16T19:21:57.113 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:57 vm01 bash[28222]: audit 2026-04-16T19:21:56.707289+0000 mon.vm01 (mon.0) 314 : audit [DBG] from='client.? 192.168.123.101:0/1020114993' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-16T19:21:57.116 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/nvme2n1 2026-04-16T19:21:57.167 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/nvme2n1 2026-04-16T19:21:57.167 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-16T19:21:57.167 INFO:teuthology.orchestra.run.vm01.stdout:Device: 5h/5d Inode: 916 Links: 1 Device type: 103,5 2026-04-16T19:21:57.167 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-16T19:21:57.167 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-16 19:19:16.630599352 +0000 2026-04-16T19:21:57.167 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-16 19:19:16.586599352 +0000 2026-04-16T19:21:57.167 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-16 19:19:16.586599352 +0000 2026-04-16T19:21:57.167 INFO:teuthology.orchestra.run.vm01.stdout: Birth: - 2026-04-16T19:21:57.167 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-04-16T19:21:57.225 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-16T19:21:57.225 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-16T19:21:57.225 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000324797 s, 1.6 MB/s 2026-04-16T19:21:57.226 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-04-16T19:21:57.272 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/nvme3n1 2026-04-16T19:21:57.319 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/nvme3n1 2026-04-16T19:21:57.319 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-16T19:21:57.319 INFO:teuthology.orchestra.run.vm01.stdout:Device: 5h/5d Inode: 926 Links: 1 Device type: 103,7 2026-04-16T19:21:57.319 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-16T19:21:57.319 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-16 19:19:16.942599352 +0000 2026-04-16T19:21:57.319 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-16 19:19:16.894599352 +0000 2026-04-16T19:21:57.319 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-16 19:19:16.894599352 +0000 2026-04-16T19:21:57.319 INFO:teuthology.orchestra.run.vm01.stdout: Birth: - 2026-04-16T19:21:57.319 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-04-16T19:21:57.372 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-16T19:21:57.373 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-16T19:21:57.373 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000247923 s, 2.1 MB/s 2026-04-16T19:21:57.373 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-04-16T19:21:57.420 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-16T19:21:57.420 DEBUG:teuthology.orchestra.run.vm04:> dd if=/scratch_devs of=/dev/stdout 2026-04-16T19:21:57.424 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-16T19:21:57.424 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/nvme0n1 2026-04-16T19:21:57.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.082487+0000 mon.vm01 (mon.0) 305 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.082487+0000 mon.vm01 (mon.0) 305 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.086970+0000 mon.vm01 (mon.0) 306 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.086970+0000 mon.vm01 (mon.0) 306 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: cephadm 2026-04-16T19:21:56.087694+0000 mgr.vm01.nwhpas (mgr.14227) 28 : cephadm [INF] Reconfiguring ceph-exporter.vm01 (monmap changed)... 2026-04-16T19:21:57.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: cephadm 2026-04-16T19:21:56.087694+0000 mgr.vm01.nwhpas (mgr.14227) 28 : cephadm [INF] Reconfiguring ceph-exporter.vm01 (monmap changed)... 2026-04-16T19:21:57.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.087936+0000 mon.vm01 (mon.0) 307 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:21:57.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.087936+0000 mon.vm01 (mon.0) 307 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.088650+0000 mon.vm01 (mon.0) 308 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.088650+0000 mon.vm01 (mon.0) 308 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: cephadm 2026-04-16T19:21:56.089252+0000 mgr.vm01.nwhpas (mgr.14227) 29 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm01 on vm01 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: cephadm 2026-04-16T19:21:56.089252+0000 mgr.vm01.nwhpas (mgr.14227) 29 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm01 on vm01 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.693737+0000 mon.vm01 (mon.0) 309 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.693737+0000 mon.vm01 (mon.0) 309 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.699380+0000 mon.vm01 (mon.0) 310 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.699380+0000 mon.vm01 (mon.0) 310 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: cephadm 2026-04-16T19:21:56.700117+0000 mgr.vm01.nwhpas (mgr.14227) 30 : cephadm [INF] Reconfiguring mgr.vm01.nwhpas (unknown last config time)... 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: cephadm 2026-04-16T19:21:56.700117+0000 mgr.vm01.nwhpas (mgr.14227) 30 : cephadm [INF] Reconfiguring mgr.vm01.nwhpas (unknown last config time)... 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.700345+0000 mon.vm01 (mon.0) 311 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm01.nwhpas", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.700345+0000 mon.vm01 (mon.0) 311 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm01.nwhpas", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.701098+0000 mon.vm01 (mon.0) 312 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr services"} : dispatch 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.701098+0000 mon.vm01 (mon.0) 312 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr services"} : dispatch 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.701547+0000 mon.vm01 (mon.0) 313 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.701547+0000 mon.vm01 (mon.0) 313 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: cephadm 2026-04-16T19:21:56.702225+0000 mgr.vm01.nwhpas (mgr.14227) 31 : cephadm [INF] Reconfiguring daemon mgr.vm01.nwhpas on vm01 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: cephadm 2026-04-16T19:21:56.702225+0000 mgr.vm01.nwhpas (mgr.14227) 31 : cephadm [INF] Reconfiguring daemon mgr.vm01.nwhpas on vm01 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.707289+0000 mon.vm01 (mon.0) 314 : audit [DBG] from='client.? 192.168.123.101:0/1020114993' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-16T19:21:57.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:57 vm04 bash[34817]: audit 2026-04-16T19:21:56.707289+0000 mon.vm01 (mon.0) 314 : audit [DBG] from='client.? 192.168.123.101:0/1020114993' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-16T19:21:57.461 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/nvme0n1 2026-04-16T19:21:57.461 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-16T19:21:57.461 INFO:teuthology.orchestra.run.vm04.stdout:Device: 5h/5d Inode: 885 Links: 1 Device type: 103,1 2026-04-16T19:21:57.461 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-16T19:21:57.461 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-16 19:19:38.926251966 +0000 2026-04-16T19:21:57.461 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-16 19:19:38.878251966 +0000 2026-04-16T19:21:57.461 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-16 19:19:38.878251966 +0000 2026-04-16T19:21:57.461 INFO:teuthology.orchestra.run.vm04.stdout: Birth: - 2026-04-16T19:21:57.461 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-04-16T19:21:57.510 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-16T19:21:57.510 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-16T19:21:57.510 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000255017 s, 2.0 MB/s 2026-04-16T19:21:57.510 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-04-16T19:21:57.558 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/nvme1n1 2026-04-16T19:21:57.605 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/nvme1n1 2026-04-16T19:21:57.605 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-16T19:21:57.605 INFO:teuthology.orchestra.run.vm04.stdout:Device: 5h/5d Inode: 895 Links: 1 Device type: 103,3 2026-04-16T19:21:57.605 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-16T19:21:57.605 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-16 19:19:39.262251966 +0000 2026-04-16T19:21:57.605 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-16 19:19:39.218251966 +0000 2026-04-16T19:21:57.605 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-16 19:19:39.218251966 +0000 2026-04-16T19:21:57.605 INFO:teuthology.orchestra.run.vm04.stdout: Birth: - 2026-04-16T19:21:57.605 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-04-16T19:21:57.653 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-16T19:21:57.653 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-16T19:21:57.653 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000231162 s, 2.2 MB/s 2026-04-16T19:21:57.654 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-04-16T19:21:57.702 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/nvme2n1 2026-04-16T19:21:57.749 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/nvme2n1 2026-04-16T19:21:57.749 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-16T19:21:57.749 INFO:teuthology.orchestra.run.vm04.stdout:Device: 5h/5d Inode: 905 Links: 1 Device type: 103,5 2026-04-16T19:21:57.749 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-16T19:21:57.749 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-16 19:19:39.586251966 +0000 2026-04-16T19:21:57.749 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-16 19:19:39.542251966 +0000 2026-04-16T19:21:57.749 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-16 19:19:39.542251966 +0000 2026-04-16T19:21:57.749 INFO:teuthology.orchestra.run.vm04.stdout: Birth: - 2026-04-16T19:21:57.749 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-04-16T19:21:57.797 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-16T19:21:57.797 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-16T19:21:57.797 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000210202 s, 2.4 MB/s 2026-04-16T19:21:57.798 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-04-16T19:21:57.846 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/nvme3n1 2026-04-16T19:21:57.893 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/nvme3n1 2026-04-16T19:21:57.893 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-16T19:21:57.893 INFO:teuthology.orchestra.run.vm04.stdout:Device: 5h/5d Inode: 915 Links: 1 Device type: 103,7 2026-04-16T19:21:57.893 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-16T19:21:57.893 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-04-16 19:19:39.922251966 +0000 2026-04-16T19:21:57.893 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-04-16 19:19:39.878251966 +0000 2026-04-16T19:21:57.893 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-04-16 19:19:39.878251966 +0000 2026-04-16T19:21:57.893 INFO:teuthology.orchestra.run.vm04.stdout: Birth: - 2026-04-16T19:21:57.893 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-04-16T19:21:57.941 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-04-16T19:21:57.941 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-04-16T19:21:57.941 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000238165 s, 2.1 MB/s 2026-04-16T19:21:57.942 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-04-16T19:21:57.990 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch apply osd --all-available-devices 2026-04-16T19:21:58.180 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: audit 2026-04-16T19:21:57.175534+0000 mon.vm01 (mon.0) 315 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.180 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: audit 2026-04-16T19:21:57.175534+0000 mon.vm01 (mon.0) 315 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.180 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: audit 2026-04-16T19:21:57.181411+0000 mon.vm01 (mon.0) 316 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.180 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: audit 2026-04-16T19:21:57.181411+0000 mon.vm01 (mon.0) 316 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.180 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: cephadm 2026-04-16T19:21:57.182200+0000 mgr.vm01.nwhpas (mgr.14227) 32 : cephadm [INF] Reconfiguring alertmanager.vm01 deps ['mgr.vm01.nwhpas', 'secure_monitoring_stack:False'] -> ['alertmanager.vm01', 'mgr.vm01.nwhpas', 'mgr.vm04.ztqrcx', 'secure_monitoring_stack:False'] (diff {'mgr.vm04.ztqrcx', 'alertmanager.vm01'}) 2026-04-16T19:21:58.180 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: cephadm 2026-04-16T19:21:57.182200+0000 mgr.vm01.nwhpas (mgr.14227) 32 : cephadm [INF] Reconfiguring alertmanager.vm01 deps ['mgr.vm01.nwhpas', 'secure_monitoring_stack:False'] -> ['alertmanager.vm01', 'mgr.vm01.nwhpas', 'mgr.vm04.ztqrcx', 'secure_monitoring_stack:False'] (diff {'mgr.vm04.ztqrcx', 'alertmanager.vm01'}) 2026-04-16T19:21:58.180 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: cephadm 2026-04-16T19:21:57.186409+0000 mgr.vm01.nwhpas (mgr.14227) 33 : cephadm [INF] Reconfiguring daemon alertmanager.vm01 on vm01 2026-04-16T19:21:58.180 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: cephadm 2026-04-16T19:21:57.186409+0000 mgr.vm01.nwhpas (mgr.14227) 33 : cephadm [INF] Reconfiguring daemon alertmanager.vm01 on vm01 2026-04-16T19:21:58.180 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: cluster 2026-04-16T19:21:57.258200+0000 mgr.vm01.nwhpas (mgr.14227) 34 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:58.180 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: cluster 2026-04-16T19:21:57.258200+0000 mgr.vm01.nwhpas (mgr.14227) 34 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:58.323 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm04/config 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: audit 2026-04-16T19:21:57.175534+0000 mon.vm01 (mon.0) 315 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: audit 2026-04-16T19:21:57.175534+0000 mon.vm01 (mon.0) 315 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: audit 2026-04-16T19:21:57.181411+0000 mon.vm01 (mon.0) 316 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: audit 2026-04-16T19:21:57.181411+0000 mon.vm01 (mon.0) 316 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: cephadm 2026-04-16T19:21:57.182200+0000 mgr.vm01.nwhpas (mgr.14227) 32 : cephadm [INF] Reconfiguring alertmanager.vm01 deps ['mgr.vm01.nwhpas', 'secure_monitoring_stack:False'] -> ['alertmanager.vm01', 'mgr.vm01.nwhpas', 'mgr.vm04.ztqrcx', 'secure_monitoring_stack:False'] (diff {'mgr.vm04.ztqrcx', 'alertmanager.vm01'}) 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: cephadm 2026-04-16T19:21:57.182200+0000 mgr.vm01.nwhpas (mgr.14227) 32 : cephadm [INF] Reconfiguring alertmanager.vm01 deps ['mgr.vm01.nwhpas', 'secure_monitoring_stack:False'] -> ['alertmanager.vm01', 'mgr.vm01.nwhpas', 'mgr.vm04.ztqrcx', 'secure_monitoring_stack:False'] (diff {'mgr.vm04.ztqrcx', 'alertmanager.vm01'}) 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: cephadm 2026-04-16T19:21:57.186409+0000 mgr.vm01.nwhpas (mgr.14227) 33 : cephadm [INF] Reconfiguring daemon alertmanager.vm01 on vm01 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: cephadm 2026-04-16T19:21:57.186409+0000 mgr.vm01.nwhpas (mgr.14227) 33 : cephadm [INF] Reconfiguring daemon alertmanager.vm01 on vm01 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: cluster 2026-04-16T19:21:57.258200+0000 mgr.vm01.nwhpas (mgr.14227) 34 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: cluster 2026-04-16T19:21:57.258200+0000 mgr.vm01.nwhpas (mgr.14227) 34 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: audit 2026-04-16T19:21:57.878680+0000 mon.vm01 (mon.0) 317 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: audit 2026-04-16T19:21:57.878680+0000 mon.vm01 (mon.0) 317 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: audit 2026-04-16T19:21:57.883786+0000 mon.vm01 (mon.0) 318 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: audit 2026-04-16T19:21:57.883786+0000 mon.vm01 (mon.0) 318 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.442 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: audit 2026-04-16T19:21:58.110514+0000 mon.vm01 (mon.0) 319 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-16T19:21:58.443 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:58 vm01 bash[28222]: audit 2026-04-16T19:21:58.110514+0000 mon.vm01 (mon.0) 319 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-16T19:21:58.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: audit 2026-04-16T19:21:57.878680+0000 mon.vm01 (mon.0) 317 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: audit 2026-04-16T19:21:57.878680+0000 mon.vm01 (mon.0) 317 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: audit 2026-04-16T19:21:57.883786+0000 mon.vm01 (mon.0) 318 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: audit 2026-04-16T19:21:57.883786+0000 mon.vm01 (mon.0) 318 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:58.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: audit 2026-04-16T19:21:58.110514+0000 mon.vm01 (mon.0) 319 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-16T19:21:58.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:58 vm04 bash[34817]: audit 2026-04-16T19:21:58.110514+0000 mon.vm01 (mon.0) 319 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-16T19:21:58.755 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled osd.all-available-devices update... 2026-04-16T19:21:58.859 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-04-16T19:21:58.859 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:21:59.170 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:21:59.752 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: cephadm 2026-04-16T19:21:57.884672+0000 mgr.vm01.nwhpas (mgr.14227) 35 : cephadm [INF] Reconfiguring grafana.vm01 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm01', 'secure_monitoring_stack:False'] (diff {'prometheus.vm01'}) 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: cephadm 2026-04-16T19:21:57.884672+0000 mgr.vm01.nwhpas (mgr.14227) 35 : cephadm [INF] Reconfiguring grafana.vm01 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm01', 'secure_monitoring_stack:False'] (diff {'prometheus.vm01'}) 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:58.111029+0000 mgr.vm01.nwhpas (mgr.14227) 36 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:58.111029+0000 mgr.vm01.nwhpas (mgr.14227) 36 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: cephadm 2026-04-16T19:21:58.114906+0000 mgr.vm01.nwhpas (mgr.14227) 37 : cephadm [INF] Reconfiguring daemon grafana.vm01 on vm01 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: cephadm 2026-04-16T19:21:58.114906+0000 mgr.vm01.nwhpas (mgr.14227) 37 : cephadm [INF] Reconfiguring daemon grafana.vm01 on vm01 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:58.743958+0000 mgr.vm01.nwhpas (mgr.14227) 38 : audit [DBG] from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:58.743958+0000 mgr.vm01.nwhpas (mgr.14227) 38 : audit [DBG] from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: cephadm 2026-04-16T19:21:58.745104+0000 mgr.vm01.nwhpas (mgr.14227) 39 : cephadm [INF] Marking host: vm01 for OSDSpec preview refresh. 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: cephadm 2026-04-16T19:21:58.745104+0000 mgr.vm01.nwhpas (mgr.14227) 39 : cephadm [INF] Marking host: vm01 for OSDSpec preview refresh. 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: cephadm 2026-04-16T19:21:58.745126+0000 mgr.vm01.nwhpas (mgr.14227) 40 : cephadm [INF] Marking host: vm04 for OSDSpec preview refresh. 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: cephadm 2026-04-16T19:21:58.745126+0000 mgr.vm01.nwhpas (mgr.14227) 40 : cephadm [INF] Marking host: vm04 for OSDSpec preview refresh. 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: cephadm 2026-04-16T19:21:58.745299+0000 mgr.vm01.nwhpas (mgr.14227) 41 : cephadm [INF] Saving service osd.all-available-devices spec with placement * 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: cephadm 2026-04-16T19:21:58.745299+0000 mgr.vm01.nwhpas (mgr.14227) 41 : cephadm [INF] Saving service osd.all-available-devices spec with placement * 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:58.753589+0000 mon.vm01 (mon.0) 320 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:58.753589+0000 mon.vm01 (mon.0) 320 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:59.055275+0000 mon.vm01 (mon.0) 321 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:59.055275+0000 mon.vm01 (mon.0) 321 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:59.066339+0000 mon.vm01 (mon.0) 322 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:59.818 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:59.066339+0000 mon.vm01 (mon.0) 322 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:21:59.819 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:59.748891+0000 mon.vm01 (mon.0) 323 : audit [DBG] from='client.? 192.168.123.101:0/1284112441' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:21:59.819 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:21:59 vm01 bash[28222]: audit 2026-04-16T19:21:59.748891+0000 mon.vm01 (mon.0) 323 : audit [DBG] from='client.? 192.168.123.101:0/1284112441' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:21:59.895 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-16T19:22:00.147 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: cephadm 2026-04-16T19:21:57.884672+0000 mgr.vm01.nwhpas (mgr.14227) 35 : cephadm [INF] Reconfiguring grafana.vm01 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm01', 'secure_monitoring_stack:False'] (diff {'prometheus.vm01'}) 2026-04-16T19:22:00.147 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: cephadm 2026-04-16T19:21:57.884672+0000 mgr.vm01.nwhpas (mgr.14227) 35 : cephadm [INF] Reconfiguring grafana.vm01 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm01', 'secure_monitoring_stack:False'] (diff {'prometheus.vm01'}) 2026-04-16T19:22:00.147 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:58.111029+0000 mgr.vm01.nwhpas (mgr.14227) 36 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:58.111029+0000 mgr.vm01.nwhpas (mgr.14227) 36 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: cephadm 2026-04-16T19:21:58.114906+0000 mgr.vm01.nwhpas (mgr.14227) 37 : cephadm [INF] Reconfiguring daemon grafana.vm01 on vm01 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: cephadm 2026-04-16T19:21:58.114906+0000 mgr.vm01.nwhpas (mgr.14227) 37 : cephadm [INF] Reconfiguring daemon grafana.vm01 on vm01 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:58.743958+0000 mgr.vm01.nwhpas (mgr.14227) 38 : audit [DBG] from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:58.743958+0000 mgr.vm01.nwhpas (mgr.14227) 38 : audit [DBG] from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: cephadm 2026-04-16T19:21:58.745104+0000 mgr.vm01.nwhpas (mgr.14227) 39 : cephadm [INF] Marking host: vm01 for OSDSpec preview refresh. 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: cephadm 2026-04-16T19:21:58.745104+0000 mgr.vm01.nwhpas (mgr.14227) 39 : cephadm [INF] Marking host: vm01 for OSDSpec preview refresh. 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: cephadm 2026-04-16T19:21:58.745126+0000 mgr.vm01.nwhpas (mgr.14227) 40 : cephadm [INF] Marking host: vm04 for OSDSpec preview refresh. 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: cephadm 2026-04-16T19:21:58.745126+0000 mgr.vm01.nwhpas (mgr.14227) 40 : cephadm [INF] Marking host: vm04 for OSDSpec preview refresh. 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: cephadm 2026-04-16T19:21:58.745299+0000 mgr.vm01.nwhpas (mgr.14227) 41 : cephadm [INF] Saving service osd.all-available-devices spec with placement * 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: cephadm 2026-04-16T19:21:58.745299+0000 mgr.vm01.nwhpas (mgr.14227) 41 : cephadm [INF] Saving service osd.all-available-devices spec with placement * 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:58.753589+0000 mon.vm01 (mon.0) 320 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:58.753589+0000 mon.vm01 (mon.0) 320 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:59.055275+0000 mon.vm01 (mon.0) 321 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:59.055275+0000 mon.vm01 (mon.0) 321 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:59.066339+0000 mon.vm01 (mon.0) 322 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:59.066339+0000 mon.vm01 (mon.0) 322 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:59.748891+0000 mon.vm01 (mon.0) 323 : audit [DBG] from='client.? 192.168.123.101:0/1284112441' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:00.148 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:21:59 vm04 bash[34817]: audit 2026-04-16T19:21:59.748891+0000 mon.vm01 (mon.0) 323 : audit [DBG] from='client.? 192.168.123.101:0/1284112441' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:00.896 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: cephadm 2026-04-16T19:21:59.067467+0000 mgr.vm01.nwhpas (mgr.14227) 42 : cephadm [INF] Reconfiguring prometheus.vm01 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm04'}) 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: cephadm 2026-04-16T19:21:59.067467+0000 mgr.vm01.nwhpas (mgr.14227) 42 : cephadm [INF] Reconfiguring prometheus.vm01 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm04'}) 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: cephadm 2026-04-16T19:21:59.248770+0000 mgr.vm01.nwhpas (mgr.14227) 43 : cephadm [INF] Reconfiguring daemon prometheus.vm01 on vm01 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: cephadm 2026-04-16T19:21:59.248770+0000 mgr.vm01.nwhpas (mgr.14227) 43 : cephadm [INF] Reconfiguring daemon prometheus.vm01 on vm01 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: cluster 2026-04-16T19:21:59.258385+0000 mgr.vm01.nwhpas (mgr.14227) 44 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: cluster 2026-04-16T19:21:59.258385+0000 mgr.vm01.nwhpas (mgr.14227) 44 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: cluster 2026-04-16T19:22:00.147844+0000 mon.vm01 (mon.0) 324 : cluster [DBG] Standby manager daemon vm04.ztqrcx started 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: cluster 2026-04-16T19:22:00.147844+0000 mon.vm01 (mon.0) 324 : cluster [DBG] Standby manager daemon vm04.ztqrcx started 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.148335+0000 mon.vm01 (mon.0) 325 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm04.ztqrcx/crt"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.148335+0000 mon.vm01 (mon.0) 325 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm04.ztqrcx/crt"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.149094+0000 mon.vm01 (mon.0) 326 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.149094+0000 mon.vm01 (mon.0) 326 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.150447+0000 mon.vm01 (mon.0) 327 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm04.ztqrcx/key"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.150447+0000 mon.vm01 (mon.0) 327 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm04.ztqrcx/key"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.151880+0000 mon.vm01 (mon.0) 328 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.151880+0000 mon.vm01 (mon.0) 328 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.171314+0000 mon.vm01 (mon.0) 329 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.171314+0000 mon.vm01 (mon.0) 329 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.176730+0000 mon.vm01 (mon.0) 330 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.176730+0000 mon.vm01 (mon.0) 330 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.177888+0000 mon.vm01 (mon.0) 331 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.177888+0000 mon.vm01 (mon.0) 331 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.178579+0000 mon.vm01 (mon.0) 332 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.178579+0000 mon.vm01 (mon.0) 332 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.723604+0000 mon.vm01 (mon.0) 333 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.723604+0000 mon.vm01 (mon.0) 333 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.727986+0000 mon.vm01 (mon.0) 334 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.727986+0000 mon.vm01 (mon.0) 334 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.728847+0000 mon.vm01 (mon.0) 335 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.728847+0000 mon.vm01 (mon.0) 335 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.729370+0000 mon.vm01 (mon.0) 336 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.729370+0000 mon.vm01 (mon.0) 336 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.729879+0000 mon.vm01 (mon.0) 337 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.067 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:00 vm04 bash[34817]: audit 2026-04-16T19:22:00.729879+0000 mon.vm01 (mon.0) 337 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.190 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:01.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: cephadm 2026-04-16T19:21:59.067467+0000 mgr.vm01.nwhpas (mgr.14227) 42 : cephadm [INF] Reconfiguring prometheus.vm01 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm04'}) 2026-04-16T19:22:01.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: cephadm 2026-04-16T19:21:59.067467+0000 mgr.vm01.nwhpas (mgr.14227) 42 : cephadm [INF] Reconfiguring prometheus.vm01 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm04'}) 2026-04-16T19:22:01.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: cephadm 2026-04-16T19:21:59.248770+0000 mgr.vm01.nwhpas (mgr.14227) 43 : cephadm [INF] Reconfiguring daemon prometheus.vm01 on vm01 2026-04-16T19:22:01.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: cephadm 2026-04-16T19:21:59.248770+0000 mgr.vm01.nwhpas (mgr.14227) 43 : cephadm [INF] Reconfiguring daemon prometheus.vm01 on vm01 2026-04-16T19:22:01.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: cluster 2026-04-16T19:21:59.258385+0000 mgr.vm01.nwhpas (mgr.14227) 44 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: cluster 2026-04-16T19:21:59.258385+0000 mgr.vm01.nwhpas (mgr.14227) 44 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: cluster 2026-04-16T19:22:00.147844+0000 mon.vm01 (mon.0) 324 : cluster [DBG] Standby manager daemon vm04.ztqrcx started 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: cluster 2026-04-16T19:22:00.147844+0000 mon.vm01 (mon.0) 324 : cluster [DBG] Standby manager daemon vm04.ztqrcx started 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.148335+0000 mon.vm01 (mon.0) 325 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm04.ztqrcx/crt"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.148335+0000 mon.vm01 (mon.0) 325 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm04.ztqrcx/crt"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.149094+0000 mon.vm01 (mon.0) 326 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.149094+0000 mon.vm01 (mon.0) 326 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.150447+0000 mon.vm01 (mon.0) 327 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm04.ztqrcx/key"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.150447+0000 mon.vm01 (mon.0) 327 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm04.ztqrcx/key"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.151880+0000 mon.vm01 (mon.0) 328 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.151880+0000 mon.vm01 (mon.0) 328 : audit [DBG] from='mgr.? 192.168.123.104:0/3896960874' entity='mgr.vm04.ztqrcx' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.171314+0000 mon.vm01 (mon.0) 329 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.171314+0000 mon.vm01 (mon.0) 329 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.176730+0000 mon.vm01 (mon.0) 330 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.176730+0000 mon.vm01 (mon.0) 330 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.177888+0000 mon.vm01 (mon.0) 331 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.177888+0000 mon.vm01 (mon.0) 331 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm04", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.178579+0000 mon.vm01 (mon.0) 332 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.178579+0000 mon.vm01 (mon.0) 332 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.723604+0000 mon.vm01 (mon.0) 333 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.723604+0000 mon.vm01 (mon.0) 333 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.727986+0000 mon.vm01 (mon.0) 334 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.727986+0000 mon.vm01 (mon.0) 334 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.728847+0000 mon.vm01 (mon.0) 335 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.728847+0000 mon.vm01 (mon.0) 335 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.729370+0000 mon.vm01 (mon.0) 336 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.729370+0000 mon.vm01 (mon.0) 336 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.729879+0000 mon.vm01 (mon.0) 337 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:00 vm01 bash[28222]: audit 2026-04-16T19:22:00.729879+0000 mon.vm01 (mon.0) 337 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.614 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:01.705 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-16T19:22:01.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:00.177642+0000 mgr.vm01.nwhpas (mgr.14227) 45 : cephadm [INF] Reconfiguring ceph-exporter.vm04 (monmap changed)... 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:00.177642+0000 mgr.vm01.nwhpas (mgr.14227) 45 : cephadm [INF] Reconfiguring ceph-exporter.vm04 (monmap changed)... 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:00.179309+0000 mgr.vm01.nwhpas (mgr.14227) 46 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm04 on vm04 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:00.179309+0000 mgr.vm01.nwhpas (mgr.14227) 46 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm04 on vm04 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:00.728647+0000 mgr.vm01.nwhpas (mgr.14227) 47 : cephadm [INF] Reconfiguring mon.vm04 (monmap changed)... 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:00.728647+0000 mgr.vm01.nwhpas (mgr.14227) 47 : cephadm [INF] Reconfiguring mon.vm04 (monmap changed)... 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:00.730435+0000 mgr.vm01.nwhpas (mgr.14227) 48 : cephadm [INF] Reconfiguring daemon mon.vm04 on vm04 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:00.730435+0000 mgr.vm01.nwhpas (mgr.14227) 48 : cephadm [INF] Reconfiguring daemon mon.vm04 on vm04 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cluster 2026-04-16T19:22:01.078025+0000 mon.vm01 (mon.0) 338 : cluster [DBG] mgrmap e18: vm01.nwhpas(active, since 23s), standbys: vm04.ztqrcx 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cluster 2026-04-16T19:22:01.078025+0000 mon.vm01 (mon.0) 338 : cluster [DBG] mgrmap e18: vm01.nwhpas(active, since 23s), standbys: vm04.ztqrcx 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.078181+0000 mon.vm01 (mon.0) 339 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm04.ztqrcx", "id": "vm04.ztqrcx"} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.078181+0000 mon.vm01 (mon.0) 339 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm04.ztqrcx", "id": "vm04.ztqrcx"} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.162397+0000 mon.vm01 (mon.0) 340 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.162397+0000 mon.vm01 (mon.0) 340 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.168593+0000 mon.vm01 (mon.0) 341 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.168593+0000 mon.vm01 (mon.0) 341 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:01.169442+0000 mgr.vm01.nwhpas (mgr.14227) 49 : cephadm [INF] Reconfiguring mgr.vm04.ztqrcx (monmap changed)... 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:01.169442+0000 mgr.vm01.nwhpas (mgr.14227) 49 : cephadm [INF] Reconfiguring mgr.vm04.ztqrcx (monmap changed)... 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.169692+0000 mon.vm01 (mon.0) 342 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm04.ztqrcx", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.169692+0000 mon.vm01 (mon.0) 342 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm04.ztqrcx", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.170406+0000 mon.vm01 (mon.0) 343 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr services"} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.170406+0000 mon.vm01 (mon.0) 343 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr services"} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.171015+0000 mon.vm01 (mon.0) 344 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.171015+0000 mon.vm01 (mon.0) 344 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:01.172062+0000 mgr.vm01.nwhpas (mgr.14227) 50 : cephadm [INF] Reconfiguring daemon mgr.vm04.ztqrcx on vm04 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:01.172062+0000 mgr.vm01.nwhpas (mgr.14227) 50 : cephadm [INF] Reconfiguring daemon mgr.vm04.ztqrcx on vm04 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cluster 2026-04-16T19:22:01.258592+0000 mgr.vm01.nwhpas (mgr.14227) 51 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cluster 2026-04-16T19:22:01.258592+0000 mgr.vm01.nwhpas (mgr.14227) 51 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.613053+0000 mon.vm01 (mon.0) 345 : audit [DBG] from='client.? 192.168.123.101:0/3521482126' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.613053+0000 mon.vm01 (mon.0) 345 : audit [DBG] from='client.? 192.168.123.101:0/3521482126' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.636100+0000 mon.vm01 (mon.0) 346 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.636100+0000 mon.vm01 (mon.0) 346 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.641541+0000 mon.vm01 (mon.0) 347 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.641541+0000 mon.vm01 (mon.0) 347 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:01.642373+0000 mgr.vm01.nwhpas (mgr.14227) 52 : cephadm [INF] Reconfiguring crash.vm04 (monmap changed)... 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:01.642373+0000 mgr.vm01.nwhpas (mgr.14227) 52 : cephadm [INF] Reconfiguring crash.vm04 (monmap changed)... 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.642644+0000 mon.vm01 (mon.0) 348 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.642644+0000 mon.vm01 (mon.0) 348 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.643307+0000 mon.vm01 (mon.0) 349 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: audit 2026-04-16T19:22:01.643307+0000 mon.vm01 (mon.0) 349 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:01.643855+0000 mgr.vm01.nwhpas (mgr.14227) 53 : cephadm [INF] Reconfiguring daemon crash.vm04 on vm04 2026-04-16T19:22:01.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:01 vm01 bash[28222]: cephadm 2026-04-16T19:22:01.643855+0000 mgr.vm01.nwhpas (mgr.14227) 53 : cephadm [INF] Reconfiguring daemon crash.vm04 on vm04 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:00.177642+0000 mgr.vm01.nwhpas (mgr.14227) 45 : cephadm [INF] Reconfiguring ceph-exporter.vm04 (monmap changed)... 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:00.177642+0000 mgr.vm01.nwhpas (mgr.14227) 45 : cephadm [INF] Reconfiguring ceph-exporter.vm04 (monmap changed)... 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:00.179309+0000 mgr.vm01.nwhpas (mgr.14227) 46 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm04 on vm04 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:00.179309+0000 mgr.vm01.nwhpas (mgr.14227) 46 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm04 on vm04 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:00.728647+0000 mgr.vm01.nwhpas (mgr.14227) 47 : cephadm [INF] Reconfiguring mon.vm04 (monmap changed)... 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:00.728647+0000 mgr.vm01.nwhpas (mgr.14227) 47 : cephadm [INF] Reconfiguring mon.vm04 (monmap changed)... 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:00.730435+0000 mgr.vm01.nwhpas (mgr.14227) 48 : cephadm [INF] Reconfiguring daemon mon.vm04 on vm04 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:00.730435+0000 mgr.vm01.nwhpas (mgr.14227) 48 : cephadm [INF] Reconfiguring daemon mon.vm04 on vm04 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cluster 2026-04-16T19:22:01.078025+0000 mon.vm01 (mon.0) 338 : cluster [DBG] mgrmap e18: vm01.nwhpas(active, since 23s), standbys: vm04.ztqrcx 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cluster 2026-04-16T19:22:01.078025+0000 mon.vm01 (mon.0) 338 : cluster [DBG] mgrmap e18: vm01.nwhpas(active, since 23s), standbys: vm04.ztqrcx 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.078181+0000 mon.vm01 (mon.0) 339 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm04.ztqrcx", "id": "vm04.ztqrcx"} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.078181+0000 mon.vm01 (mon.0) 339 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr metadata", "who": "vm04.ztqrcx", "id": "vm04.ztqrcx"} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.162397+0000 mon.vm01 (mon.0) 340 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.162397+0000 mon.vm01 (mon.0) 340 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.168593+0000 mon.vm01 (mon.0) 341 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.168593+0000 mon.vm01 (mon.0) 341 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:01.169442+0000 mgr.vm01.nwhpas (mgr.14227) 49 : cephadm [INF] Reconfiguring mgr.vm04.ztqrcx (monmap changed)... 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:01.169442+0000 mgr.vm01.nwhpas (mgr.14227) 49 : cephadm [INF] Reconfiguring mgr.vm04.ztqrcx (monmap changed)... 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.169692+0000 mon.vm01 (mon.0) 342 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm04.ztqrcx", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.169692+0000 mon.vm01 (mon.0) 342 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm04.ztqrcx", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.170406+0000 mon.vm01 (mon.0) 343 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr services"} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.170406+0000 mon.vm01 (mon.0) 343 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mgr services"} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.171015+0000 mon.vm01 (mon.0) 344 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.171015+0000 mon.vm01 (mon.0) 344 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:01.172062+0000 mgr.vm01.nwhpas (mgr.14227) 50 : cephadm [INF] Reconfiguring daemon mgr.vm04.ztqrcx on vm04 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:01.172062+0000 mgr.vm01.nwhpas (mgr.14227) 50 : cephadm [INF] Reconfiguring daemon mgr.vm04.ztqrcx on vm04 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cluster 2026-04-16T19:22:01.258592+0000 mgr.vm01.nwhpas (mgr.14227) 51 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cluster 2026-04-16T19:22:01.258592+0000 mgr.vm01.nwhpas (mgr.14227) 51 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.613053+0000 mon.vm01 (mon.0) 345 : audit [DBG] from='client.? 192.168.123.101:0/3521482126' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.613053+0000 mon.vm01 (mon.0) 345 : audit [DBG] from='client.? 192.168.123.101:0/3521482126' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.636100+0000 mon.vm01 (mon.0) 346 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.636100+0000 mon.vm01 (mon.0) 346 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.641541+0000 mon.vm01 (mon.0) 347 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.641541+0000 mon.vm01 (mon.0) 347 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:01.642373+0000 mgr.vm01.nwhpas (mgr.14227) 52 : cephadm [INF] Reconfiguring crash.vm04 (monmap changed)... 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:01.642373+0000 mgr.vm01.nwhpas (mgr.14227) 52 : cephadm [INF] Reconfiguring crash.vm04 (monmap changed)... 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.642644+0000 mon.vm01 (mon.0) 348 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.642644+0000 mon.vm01 (mon.0) 348 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm04", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-16T19:22:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.643307+0000 mon.vm01 (mon.0) 349 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:02.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: audit 2026-04-16T19:22:01.643307+0000 mon.vm01 (mon.0) 349 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:02.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:01.643855+0000 mgr.vm01.nwhpas (mgr.14227) 53 : cephadm [INF] Reconfiguring daemon crash.vm04 on vm04 2026-04-16T19:22:02.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:01 vm04 bash[34817]: cephadm 2026-04-16T19:22:01.643855+0000 mgr.vm01.nwhpas (mgr.14227) 53 : cephadm [INF] Reconfiguring daemon crash.vm04 on vm04 2026-04-16T19:22:02.706 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:03.035 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:03.400 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.082396+0000 mon.vm01 (mon.0) 350 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.082396+0000 mon.vm01 (mon.0) 350 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.088232+0000 mon.vm01 (mon.0) 351 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.088232+0000 mon.vm01 (mon.0) 351 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.094174+0000 mon.vm01 (mon.0) 352 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.094174+0000 mon.vm01 (mon.0) 352 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.100709+0000 mon.vm01 (mon.0) 353 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.100709+0000 mon.vm01 (mon.0) 353 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.103532+0000 mon.vm01 (mon.0) 354 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.103532+0000 mon.vm01 (mon.0) 354 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.103928+0000 mgr.vm01.nwhpas (mgr.14227) 54 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.103928+0000 mgr.vm01.nwhpas (mgr.14227) 54 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.104768+0000 mon.vm01 (mon.0) 355 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"} : dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.104768+0000 mon.vm01 (mon.0) 355 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"} : dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.105086+0000 mgr.vm01.nwhpas (mgr.14227) 55 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.105086+0000 mgr.vm01.nwhpas (mgr.14227) 55 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.109206+0000 mon.vm01 (mon.0) 356 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.109206+0000 mon.vm01 (mon.0) 356 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.121662+0000 mon.vm01 (mon.0) 357 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.121662+0000 mon.vm01 (mon.0) 357 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.127405+0000 mon.vm01 (mon.0) 358 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.127405+0000 mon.vm01 (mon.0) 358 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.130238+0000 mon.vm01 (mon.0) 359 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.130238+0000 mon.vm01 (mon.0) 359 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.130571+0000 mgr.vm01.nwhpas (mgr.14227) 56 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.130571+0000 mgr.vm01.nwhpas (mgr.14227) 56 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.131318+0000 mon.vm01 (mon.0) 360 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"} : dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.131318+0000 mon.vm01 (mon.0) 360 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"} : dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.131525+0000 mgr.vm01.nwhpas (mgr.14227) 57 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"}]: dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.131525+0000 mgr.vm01.nwhpas (mgr.14227) 57 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"}]: dispatch 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.135692+0000 mon.vm01 (mon.0) 361 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.135692+0000 mon.vm01 (mon.0) 361 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.148915+0000 mon.vm01 (mon.0) 362 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.148915+0000 mon.vm01 (mon.0) 362 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.154490+0000 mon.vm01 (mon.0) 363 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.401 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.154490+0000 mon.vm01 (mon.0) 363 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.157273+0000 mon.vm01 (mon.0) 364 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.157273+0000 mon.vm01 (mon.0) 364 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.157658+0000 mgr.vm01.nwhpas (mgr.14227) 58 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.157658+0000 mgr.vm01.nwhpas (mgr.14227) 58 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.158513+0000 mon.vm01 (mon.0) 365 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"} : dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.158513+0000 mon.vm01 (mon.0) 365 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"} : dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.158751+0000 mgr.vm01.nwhpas (mgr.14227) 59 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"}]: dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.158751+0000 mgr.vm01.nwhpas (mgr.14227) 59 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"}]: dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.163019+0000 mon.vm01 (mon.0) 366 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.163019+0000 mon.vm01 (mon.0) 366 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: cephadm 2026-04-16T19:22:02.372808+0000 mgr.vm01.nwhpas (mgr.14227) 60 : cephadm [INF] Certificate for "grafana_cert (vm01)" is still valid for 1094 days. 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: cephadm 2026-04-16T19:22:02.372808+0000 mgr.vm01.nwhpas (mgr.14227) 60 : cephadm [INF] Certificate for "grafana_cert (vm01)" is still valid for 1094 days. 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.373385+0000 mon.vm01 (mon.0) 367 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.373385+0000 mon.vm01 (mon.0) 367 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.568545+0000 mon.vm01 (mon.0) 368 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.568545+0000 mon.vm01 (mon.0) 368 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.569129+0000 mgr.vm01.nwhpas (mgr.14227) 61 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:03.402 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:03 vm01 bash[28222]: audit 2026-04-16T19:22:02.569129+0000 mgr.vm01.nwhpas (mgr.14227) 61 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.082396+0000 mon.vm01 (mon.0) 350 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.082396+0000 mon.vm01 (mon.0) 350 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.088232+0000 mon.vm01 (mon.0) 351 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.088232+0000 mon.vm01 (mon.0) 351 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.094174+0000 mon.vm01 (mon.0) 352 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.094174+0000 mon.vm01 (mon.0) 352 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.100709+0000 mon.vm01 (mon.0) 353 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.100709+0000 mon.vm01 (mon.0) 353 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.103532+0000 mon.vm01 (mon.0) 354 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.103532+0000 mon.vm01 (mon.0) 354 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.103928+0000 mgr.vm01.nwhpas (mgr.14227) 54 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.103928+0000 mgr.vm01.nwhpas (mgr.14227) 54 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.104768+0000 mon.vm01 (mon.0) 355 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"} : dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.104768+0000 mon.vm01 (mon.0) 355 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"} : dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.105086+0000 mgr.vm01.nwhpas (mgr.14227) 55 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.105086+0000 mgr.vm01.nwhpas (mgr.14227) 55 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm01.local:9093"}]: dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.109206+0000 mon.vm01 (mon.0) 356 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.109206+0000 mon.vm01 (mon.0) 356 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.121662+0000 mon.vm01 (mon.0) 357 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.121662+0000 mon.vm01 (mon.0) 357 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.127405+0000 mon.vm01 (mon.0) 358 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.127405+0000 mon.vm01 (mon.0) 358 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.130238+0000 mon.vm01 (mon.0) 359 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.130238+0000 mon.vm01 (mon.0) 359 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.130571+0000 mgr.vm01.nwhpas (mgr.14227) 56 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.130571+0000 mgr.vm01.nwhpas (mgr.14227) 56 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.131318+0000 mon.vm01 (mon.0) 360 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"} : dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.131318+0000 mon.vm01 (mon.0) 360 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"} : dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.131525+0000 mgr.vm01.nwhpas (mgr.14227) 57 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"}]: dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.131525+0000 mgr.vm01.nwhpas (mgr.14227) 57 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm01.local:3000"}]: dispatch 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.135692+0000 mon.vm01 (mon.0) 361 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.135692+0000 mon.vm01 (mon.0) 361 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.148915+0000 mon.vm01 (mon.0) 362 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.148915+0000 mon.vm01 (mon.0) 362 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.154490+0000 mon.vm01 (mon.0) 363 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.154490+0000 mon.vm01 (mon.0) 363 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.157273+0000 mon.vm01 (mon.0) 364 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.157273+0000 mon.vm01 (mon.0) 364 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.157658+0000 mgr.vm01.nwhpas (mgr.14227) 58 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.157658+0000 mgr.vm01.nwhpas (mgr.14227) 58 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.158513+0000 mon.vm01 (mon.0) 365 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"} : dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.158513+0000 mon.vm01 (mon.0) 365 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"} : dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.158751+0000 mgr.vm01.nwhpas (mgr.14227) 59 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"}]: dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.158751+0000 mgr.vm01.nwhpas (mgr.14227) 59 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm01.local:9095"}]: dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.163019+0000 mon.vm01 (mon.0) 366 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.163019+0000 mon.vm01 (mon.0) 366 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: cephadm 2026-04-16T19:22:02.372808+0000 mgr.vm01.nwhpas (mgr.14227) 60 : cephadm [INF] Certificate for "grafana_cert (vm01)" is still valid for 1094 days. 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: cephadm 2026-04-16T19:22:02.372808+0000 mgr.vm01.nwhpas (mgr.14227) 60 : cephadm [INF] Certificate for "grafana_cert (vm01)" is still valid for 1094 days. 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.373385+0000 mon.vm01 (mon.0) 367 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.373385+0000 mon.vm01 (mon.0) 367 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.568545+0000 mon.vm01 (mon.0) 368 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.568545+0000 mon.vm01 (mon.0) 368 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.569129+0000 mgr.vm01.nwhpas (mgr.14227) 61 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:03.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:03 vm04 bash[34817]: audit 2026-04-16T19:22:02.569129+0000 mgr.vm01.nwhpas (mgr.14227) 61 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:03.538 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:03.619 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-16T19:22:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: audit 2026-04-16T19:22:03.183082+0000 mon.vm01 (mon.0) 369 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: audit 2026-04-16T19:22:03.183082+0000 mon.vm01 (mon.0) 369 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: audit 2026-04-16T19:22:03.184003+0000 mgr.vm01.nwhpas (mgr.14227) 62 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: audit 2026-04-16T19:22:03.184003+0000 mgr.vm01.nwhpas (mgr.14227) 62 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: cluster 2026-04-16T19:22:03.258828+0000 mgr.vm01.nwhpas (mgr.14227) 63 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: cluster 2026-04-16T19:22:03.258828+0000 mgr.vm01.nwhpas (mgr.14227) 63 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: audit 2026-04-16T19:22:03.297614+0000 mon.vm01 (mon.0) 370 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: audit 2026-04-16T19:22:03.297614+0000 mon.vm01 (mon.0) 370 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: audit 2026-04-16T19:22:03.298327+0000 mgr.vm01.nwhpas (mgr.14227) 64 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: audit 2026-04-16T19:22:03.298327+0000 mgr.vm01.nwhpas (mgr.14227) 64 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: audit 2026-04-16T19:22:03.537367+0000 mon.vm01 (mon.0) 371 : audit [DBG] from='client.? 192.168.123.101:0/3383314918' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:04.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:04 vm04 bash[34817]: audit 2026-04-16T19:22:03.537367+0000 mon.vm01 (mon.0) 371 : audit [DBG] from='client.? 192.168.123.101:0/3383314918' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: audit 2026-04-16T19:22:03.183082+0000 mon.vm01 (mon.0) 369 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: audit 2026-04-16T19:22:03.183082+0000 mon.vm01 (mon.0) 369 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: audit 2026-04-16T19:22:03.184003+0000 mgr.vm01.nwhpas (mgr.14227) 62 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: audit 2026-04-16T19:22:03.184003+0000 mgr.vm01.nwhpas (mgr.14227) 62 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: cluster 2026-04-16T19:22:03.258828+0000 mgr.vm01.nwhpas (mgr.14227) 63 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: cluster 2026-04-16T19:22:03.258828+0000 mgr.vm01.nwhpas (mgr.14227) 63 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: audit 2026-04-16T19:22:03.297614+0000 mon.vm01 (mon.0) 370 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: audit 2026-04-16T19:22:03.297614+0000 mon.vm01 (mon.0) 370 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: audit 2026-04-16T19:22:03.298327+0000 mgr.vm01.nwhpas (mgr.14227) 64 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: audit 2026-04-16T19:22:03.298327+0000 mgr.vm01.nwhpas (mgr.14227) 64 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: audit 2026-04-16T19:22:03.537367+0000 mon.vm01 (mon.0) 371 : audit [DBG] from='client.? 192.168.123.101:0/3383314918' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:04 vm01 bash[28222]: audit 2026-04-16T19:22:03.537367+0000 mon.vm01 (mon.0) 371 : audit [DBG] from='client.? 192.168.123.101:0/3383314918' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:04.620 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:04.919 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:05.331 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:05.399 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-16T19:22:06.399 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:06.590 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:06 vm01 bash[28222]: cluster 2026-04-16T19:22:05.259025+0000 mgr.vm01.nwhpas (mgr.14227) 65 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:06.590 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:06 vm01 bash[28222]: cluster 2026-04-16T19:22:05.259025+0000 mgr.vm01.nwhpas (mgr.14227) 65 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:06.590 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:06 vm01 bash[28222]: audit 2026-04-16T19:22:05.330717+0000 mon.vm01 (mon.0) 372 : audit [DBG] from='client.? 192.168.123.101:0/3133352125' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:06.590 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:06 vm01 bash[28222]: audit 2026-04-16T19:22:05.330717+0000 mon.vm01 (mon.0) 372 : audit [DBG] from='client.? 192.168.123.101:0/3133352125' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:06.696 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:06.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:06 vm04 bash[34817]: cluster 2026-04-16T19:22:05.259025+0000 mgr.vm01.nwhpas (mgr.14227) 65 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:06.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:06 vm04 bash[34817]: cluster 2026-04-16T19:22:05.259025+0000 mgr.vm01.nwhpas (mgr.14227) 65 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:06.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:06 vm04 bash[34817]: audit 2026-04-16T19:22:05.330717+0000 mon.vm01 (mon.0) 372 : audit [DBG] from='client.? 192.168.123.101:0/3133352125' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:06.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:06 vm04 bash[34817]: audit 2026-04-16T19:22:05.330717+0000 mon.vm01 (mon.0) 372 : audit [DBG] from='client.? 192.168.123.101:0/3133352125' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:07.107 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:07.337 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-16T19:22:08.338 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.012819+0000 mon.vm01 (mon.0) 373 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.012819+0000 mon.vm01 (mon.0) 373 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.018657+0000 mon.vm01 (mon.0) 374 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.018657+0000 mon.vm01 (mon.0) 374 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.025570+0000 mon.vm01 (mon.0) 375 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.025570+0000 mon.vm01 (mon.0) 375 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.031440+0000 mon.vm01 (mon.0) 376 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.031440+0000 mon.vm01 (mon.0) 376 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.106886+0000 mon.vm01 (mon.0) 377 : audit [DBG] from='client.? 192.168.123.101:0/2168223915' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.106886+0000 mon.vm01 (mon.0) 377 : audit [DBG] from='client.? 192.168.123.101:0/2168223915' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: cluster 2026-04-16T19:22:07.259182+0000 mgr.vm01.nwhpas (mgr.14227) 66 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: cluster 2026-04-16T19:22:07.259182+0000 mgr.vm01.nwhpas (mgr.14227) 66 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.558115+0000 mon.vm01 (mon.0) 378 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:08.348 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:08 vm01 bash[28222]: audit 2026-04-16T19:22:07.558115+0000 mon.vm01 (mon.0) 378 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.012819+0000 mon.vm01 (mon.0) 373 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.012819+0000 mon.vm01 (mon.0) 373 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.018657+0000 mon.vm01 (mon.0) 374 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.018657+0000 mon.vm01 (mon.0) 374 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.025570+0000 mon.vm01 (mon.0) 375 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.025570+0000 mon.vm01 (mon.0) 375 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.031440+0000 mon.vm01 (mon.0) 376 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.031440+0000 mon.vm01 (mon.0) 376 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.106886+0000 mon.vm01 (mon.0) 377 : audit [DBG] from='client.? 192.168.123.101:0/2168223915' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.106886+0000 mon.vm01 (mon.0) 377 : audit [DBG] from='client.? 192.168.123.101:0/2168223915' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: cluster 2026-04-16T19:22:07.259182+0000 mgr.vm01.nwhpas (mgr.14227) 66 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: cluster 2026-04-16T19:22:07.259182+0000 mgr.vm01.nwhpas (mgr.14227) 66 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.558115+0000 mon.vm01 (mon.0) 378 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:08.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:08 vm04 bash[34817]: audit 2026-04-16T19:22:07.558115+0000 mon.vm01 (mon.0) 378 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:08.682 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:09.139 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:09.255 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.024574+0000 mon.vm01 (mon.0) 379 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.024574+0000 mon.vm01 (mon.0) 379 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.029407+0000 mon.vm01 (mon.0) 380 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.029407+0000 mon.vm01 (mon.0) 380 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.035276+0000 mon.vm01 (mon.0) 381 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.035276+0000 mon.vm01 (mon.0) 381 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.040155+0000 mon.vm01 (mon.0) 382 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.040155+0000 mon.vm01 (mon.0) 382 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.040918+0000 mon.vm01 (mon.0) 383 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.040918+0000 mon.vm01 (mon.0) 383 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.041343+0000 mon.vm01 (mon.0) 384 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.041343+0000 mon.vm01 (mon.0) 384 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:22:09.355 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.044686+0000 mon.vm01 (mon.0) 385 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.356 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.044686+0000 mon.vm01 (mon.0) 385 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.356 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.046047+0000 mon.vm01 (mon.0) 386 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:22:09.356 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.046047+0000 mon.vm01 (mon.0) 386 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:22:09.356 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.048525+0000 mon.vm01 (mon.0) 387 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-16T19:22:09.356 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.048525+0000 mon.vm01 (mon.0) 387 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-16T19:22:09.356 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.049013+0000 mon.vm01 (mon.0) 388 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:09.356 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.049013+0000 mon.vm01 (mon.0) 388 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:09.356 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.050818+0000 mon.vm01 (mon.0) 389 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-16T19:22:09.356 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.050818+0000 mon.vm01 (mon.0) 389 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-16T19:22:09.356 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.051224+0000 mon.vm01 (mon.0) 390 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:09.356 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:09 vm01 bash[28222]: audit 2026-04-16T19:22:08.051224+0000 mon.vm01 (mon.0) 390 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.024574+0000 mon.vm01 (mon.0) 379 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.024574+0000 mon.vm01 (mon.0) 379 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.029407+0000 mon.vm01 (mon.0) 380 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.029407+0000 mon.vm01 (mon.0) 380 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.035276+0000 mon.vm01 (mon.0) 381 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.035276+0000 mon.vm01 (mon.0) 381 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.040155+0000 mon.vm01 (mon.0) 382 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.040155+0000 mon.vm01 (mon.0) 382 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.040918+0000 mon.vm01 (mon.0) 383 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.040918+0000 mon.vm01 (mon.0) 383 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.041343+0000 mon.vm01 (mon.0) 384 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.041343+0000 mon.vm01 (mon.0) 384 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.044686+0000 mon.vm01 (mon.0) 385 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.044686+0000 mon.vm01 (mon.0) 385 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.046047+0000 mon.vm01 (mon.0) 386 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.046047+0000 mon.vm01 (mon.0) 386 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:22:09.366 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.048525+0000 mon.vm01 (mon.0) 387 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-16T19:22:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.048525+0000 mon.vm01 (mon.0) 387 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-16T19:22:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.049013+0000 mon.vm01 (mon.0) 388 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.049013+0000 mon.vm01 (mon.0) 388 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.050818+0000 mon.vm01 (mon.0) 389 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-16T19:22:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.050818+0000 mon.vm01 (mon.0) 389 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-16T19:22:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.051224+0000 mon.vm01 (mon.0) 390 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:09.367 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:09 vm04 bash[34817]: audit 2026-04-16T19:22:08.051224+0000 mon.vm01 (mon.0) 390 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:10.256 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: audit 2026-04-16T19:22:09.138725+0000 mon.vm01 (mon.0) 391 : audit [DBG] from='client.? 192.168.123.101:0/2857935274' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: audit 2026-04-16T19:22:09.138725+0000 mon.vm01 (mon.0) 391 : audit [DBG] from='client.? 192.168.123.101:0/2857935274' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: cluster 2026-04-16T19:22:09.259410+0000 mgr.vm01.nwhpas (mgr.14227) 67 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: cluster 2026-04-16T19:22:09.259410+0000 mgr.vm01.nwhpas (mgr.14227) 67 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: audit 2026-04-16T19:22:10.015544+0000 mon.vm04 (mon.1) 2 : audit [INF] from='client.? 192.168.123.104:0/725424356' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"} : dispatch 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: audit 2026-04-16T19:22:10.015544+0000 mon.vm04 (mon.1) 2 : audit [INF] from='client.? 192.168.123.104:0/725424356' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"} : dispatch 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: audit 2026-04-16T19:22:10.016571+0000 mon.vm01 (mon.0) 392 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"} : dispatch 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: audit 2026-04-16T19:22:10.016571+0000 mon.vm01 (mon.0) 392 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"} : dispatch 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: audit 2026-04-16T19:22:10.021114+0000 mon.vm01 (mon.0) 393 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"}]': finished 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: audit 2026-04-16T19:22:10.021114+0000 mon.vm01 (mon.0) 393 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"}]': finished 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: cluster 2026-04-16T19:22:10.023856+0000 mon.vm01 (mon.0) 394 : cluster [DBG] osdmap e6: 1 total, 0 up, 1 in 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: cluster 2026-04-16T19:22:10.023856+0000 mon.vm01 (mon.0) 394 : cluster [DBG] osdmap e6: 1 total, 0 up, 1 in 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: audit 2026-04-16T19:22:10.024231+0000 mon.vm01 (mon.0) 395 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:10.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:10 vm04 bash[34817]: audit 2026-04-16T19:22:10.024231+0000 mon.vm01 (mon.0) 395 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: audit 2026-04-16T19:22:09.138725+0000 mon.vm01 (mon.0) 391 : audit [DBG] from='client.? 192.168.123.101:0/2857935274' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: audit 2026-04-16T19:22:09.138725+0000 mon.vm01 (mon.0) 391 : audit [DBG] from='client.? 192.168.123.101:0/2857935274' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: cluster 2026-04-16T19:22:09.259410+0000 mgr.vm01.nwhpas (mgr.14227) 67 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: cluster 2026-04-16T19:22:09.259410+0000 mgr.vm01.nwhpas (mgr.14227) 67 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: audit 2026-04-16T19:22:10.015544+0000 mon.vm04 (mon.1) 2 : audit [INF] from='client.? 192.168.123.104:0/725424356' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"} : dispatch 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: audit 2026-04-16T19:22:10.015544+0000 mon.vm04 (mon.1) 2 : audit [INF] from='client.? 192.168.123.104:0/725424356' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"} : dispatch 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: audit 2026-04-16T19:22:10.016571+0000 mon.vm01 (mon.0) 392 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"} : dispatch 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: audit 2026-04-16T19:22:10.016571+0000 mon.vm01 (mon.0) 392 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"} : dispatch 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: audit 2026-04-16T19:22:10.021114+0000 mon.vm01 (mon.0) 393 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"}]': finished 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: audit 2026-04-16T19:22:10.021114+0000 mon.vm01 (mon.0) 393 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f4401b97-80b9-400c-9e95-0cc516e41cfc"}]': finished 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: cluster 2026-04-16T19:22:10.023856+0000 mon.vm01 (mon.0) 394 : cluster [DBG] osdmap e6: 1 total, 0 up, 1 in 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: cluster 2026-04-16T19:22:10.023856+0000 mon.vm01 (mon.0) 394 : cluster [DBG] osdmap e6: 1 total, 0 up, 1 in 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: audit 2026-04-16T19:22:10.024231+0000 mon.vm01 (mon.0) 395 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:10.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:10 vm01 bash[28222]: audit 2026-04-16T19:22:10.024231+0000 mon.vm01 (mon.0) 395 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:10.551 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:11.080 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:11.184 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1776367330,"num_remapped_pgs":0} 2026-04-16T19:22:11.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.081706+0000 mon.vm01 (mon.0) 396 : audit [INF] from='client.? 192.168.123.101:0/1487638471' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "41dc1c75-f170-45be-bb8a-72218138afbc"} : dispatch 2026-04-16T19:22:11.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.081706+0000 mon.vm01 (mon.0) 396 : audit [INF] from='client.? 192.168.123.101:0/1487638471' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "41dc1c75-f170-45be-bb8a-72218138afbc"} : dispatch 2026-04-16T19:22:11.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.084769+0000 mon.vm01 (mon.0) 397 : audit [INF] from='client.? 192.168.123.101:0/1487638471' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "41dc1c75-f170-45be-bb8a-72218138afbc"}]': finished 2026-04-16T19:22:11.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.084769+0000 mon.vm01 (mon.0) 397 : audit [INF] from='client.? 192.168.123.101:0/1487638471' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "41dc1c75-f170-45be-bb8a-72218138afbc"}]': finished 2026-04-16T19:22:11.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: cluster 2026-04-16T19:22:10.087133+0000 mon.vm01 (mon.0) 398 : cluster [DBG] osdmap e7: 2 total, 0 up, 2 in 2026-04-16T19:22:11.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: cluster 2026-04-16T19:22:10.087133+0000 mon.vm01 (mon.0) 398 : cluster [DBG] osdmap e7: 2 total, 0 up, 2 in 2026-04-16T19:22:11.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.087249+0000 mon.vm01 (mon.0) 399 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:11.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.087249+0000 mon.vm01 (mon.0) 399 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:11.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.087359+0000 mon.vm01 (mon.0) 400 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:11.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.087359+0000 mon.vm01 (mon.0) 400 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:11.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.818460+0000 mon.vm04 (mon.1) 3 : audit [DBG] from='client.? 192.168.123.104:0/3575353002' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:11.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.818460+0000 mon.vm04 (mon.1) 3 : audit [DBG] from='client.? 192.168.123.104:0/3575353002' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:11.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.844329+0000 mon.vm01 (mon.0) 401 : audit [DBG] from='client.? 192.168.123.101:0/2643803779' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:11.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:11 vm04 bash[34817]: audit 2026-04-16T19:22:10.844329+0000 mon.vm01 (mon.0) 401 : audit [DBG] from='client.? 192.168.123.101:0/2643803779' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.081706+0000 mon.vm01 (mon.0) 396 : audit [INF] from='client.? 192.168.123.101:0/1487638471' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "41dc1c75-f170-45be-bb8a-72218138afbc"} : dispatch 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.081706+0000 mon.vm01 (mon.0) 396 : audit [INF] from='client.? 192.168.123.101:0/1487638471' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "41dc1c75-f170-45be-bb8a-72218138afbc"} : dispatch 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.084769+0000 mon.vm01 (mon.0) 397 : audit [INF] from='client.? 192.168.123.101:0/1487638471' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "41dc1c75-f170-45be-bb8a-72218138afbc"}]': finished 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.084769+0000 mon.vm01 (mon.0) 397 : audit [INF] from='client.? 192.168.123.101:0/1487638471' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "41dc1c75-f170-45be-bb8a-72218138afbc"}]': finished 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: cluster 2026-04-16T19:22:10.087133+0000 mon.vm01 (mon.0) 398 : cluster [DBG] osdmap e7: 2 total, 0 up, 2 in 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: cluster 2026-04-16T19:22:10.087133+0000 mon.vm01 (mon.0) 398 : cluster [DBG] osdmap e7: 2 total, 0 up, 2 in 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.087249+0000 mon.vm01 (mon.0) 399 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.087249+0000 mon.vm01 (mon.0) 399 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.087359+0000 mon.vm01 (mon.0) 400 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.087359+0000 mon.vm01 (mon.0) 400 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.818460+0000 mon.vm04 (mon.1) 3 : audit [DBG] from='client.? 192.168.123.104:0/3575353002' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.818460+0000 mon.vm04 (mon.1) 3 : audit [DBG] from='client.? 192.168.123.104:0/3575353002' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.844329+0000 mon.vm01 (mon.0) 401 : audit [DBG] from='client.? 192.168.123.101:0/2643803779' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:11.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:11 vm01 bash[28222]: audit 2026-04-16T19:22:10.844329+0000 mon.vm01 (mon.0) 401 : audit [DBG] from='client.? 192.168.123.101:0/2643803779' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:12.185 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:12.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.080132+0000 mon.vm01 (mon.0) 402 : audit [DBG] from='client.? 192.168.123.101:0/712351144' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:12.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.080132+0000 mon.vm01 (mon.0) 402 : audit [DBG] from='client.? 192.168.123.101:0/712351144' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:12.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: cluster 2026-04-16T19:22:11.259622+0000 mgr.vm01.nwhpas (mgr.14227) 68 : cluster [DBG] pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:12.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: cluster 2026-04-16T19:22:11.259622+0000 mgr.vm01.nwhpas (mgr.14227) 68 : cluster [DBG] pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.911288+0000 mon.vm04 (mon.1) 4 : audit [INF] from='client.? 192.168.123.104:0/4245214485' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.911288+0000 mon.vm04 (mon.1) 4 : audit [INF] from='client.? 192.168.123.104:0/4245214485' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.912410+0000 mon.vm01 (mon.0) 403 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.912410+0000 mon.vm01 (mon.0) 403 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.916590+0000 mon.vm01 (mon.0) 404 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"}]': finished 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.916590+0000 mon.vm01 (mon.0) 404 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"}]': finished 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: cluster 2026-04-16T19:22:11.920761+0000 mon.vm01 (mon.0) 405 : cluster [DBG] osdmap e8: 3 total, 0 up, 3 in 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: cluster 2026-04-16T19:22:11.920761+0000 mon.vm01 (mon.0) 405 : cluster [DBG] osdmap e8: 3 total, 0 up, 3 in 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.921022+0000 mon.vm01 (mon.0) 406 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.921022+0000 mon.vm01 (mon.0) 406 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.921129+0000 mon.vm01 (mon.0) 407 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.921129+0000 mon.vm01 (mon.0) 407 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.921196+0000 mon.vm01 (mon.0) 408 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.921196+0000 mon.vm01 (mon.0) 408 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.989843+0000 mon.vm01 (mon.0) 409 : audit [INF] from='client.? 192.168.123.101:0/2809745158' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f8707e07-f8bc-46db-ad2f-0aa8f0a897b7"} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.989843+0000 mon.vm01 (mon.0) 409 : audit [INF] from='client.? 192.168.123.101:0/2809745158' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f8707e07-f8bc-46db-ad2f-0aa8f0a897b7"} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.994956+0000 mon.vm01 (mon.0) 410 : audit [INF] from='client.? 192.168.123.101:0/2809745158' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f8707e07-f8bc-46db-ad2f-0aa8f0a897b7"}]': finished 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.994956+0000 mon.vm01 (mon.0) 410 : audit [INF] from='client.? 192.168.123.101:0/2809745158' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f8707e07-f8bc-46db-ad2f-0aa8f0a897b7"}]': finished 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: cluster 2026-04-16T19:22:11.998700+0000 mon.vm01 (mon.0) 411 : cluster [DBG] osdmap e9: 4 total, 0 up, 4 in 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: cluster 2026-04-16T19:22:11.998700+0000 mon.vm01 (mon.0) 411 : cluster [DBG] osdmap e9: 4 total, 0 up, 4 in 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.998968+0000 mon.vm01 (mon.0) 412 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.998968+0000 mon.vm01 (mon.0) 412 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.999155+0000 mon.vm01 (mon.0) 413 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.999155+0000 mon.vm01 (mon.0) 413 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.999265+0000 mon.vm01 (mon.0) 414 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.999265+0000 mon.vm01 (mon.0) 414 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.999320+0000 mon.vm01 (mon.0) 415 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:12.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:12 vm04 bash[34817]: audit 2026-04-16T19:22:11.999320+0000 mon.vm01 (mon.0) 415 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.080132+0000 mon.vm01 (mon.0) 402 : audit [DBG] from='client.? 192.168.123.101:0/712351144' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.080132+0000 mon.vm01 (mon.0) 402 : audit [DBG] from='client.? 192.168.123.101:0/712351144' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: cluster 2026-04-16T19:22:11.259622+0000 mgr.vm01.nwhpas (mgr.14227) 68 : cluster [DBG] pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: cluster 2026-04-16T19:22:11.259622+0000 mgr.vm01.nwhpas (mgr.14227) 68 : cluster [DBG] pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.911288+0000 mon.vm04 (mon.1) 4 : audit [INF] from='client.? 192.168.123.104:0/4245214485' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.911288+0000 mon.vm04 (mon.1) 4 : audit [INF] from='client.? 192.168.123.104:0/4245214485' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.912410+0000 mon.vm01 (mon.0) 403 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.912410+0000 mon.vm01 (mon.0) 403 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.916590+0000 mon.vm01 (mon.0) 404 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"}]': finished 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.916590+0000 mon.vm01 (mon.0) 404 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "31f3cbba-e205-40f7-b992-d9d70a84e201"}]': finished 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: cluster 2026-04-16T19:22:11.920761+0000 mon.vm01 (mon.0) 405 : cluster [DBG] osdmap e8: 3 total, 0 up, 3 in 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: cluster 2026-04-16T19:22:11.920761+0000 mon.vm01 (mon.0) 405 : cluster [DBG] osdmap e8: 3 total, 0 up, 3 in 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.921022+0000 mon.vm01 (mon.0) 406 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.921022+0000 mon.vm01 (mon.0) 406 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.921129+0000 mon.vm01 (mon.0) 407 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.921129+0000 mon.vm01 (mon.0) 407 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.921196+0000 mon.vm01 (mon.0) 408 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.921196+0000 mon.vm01 (mon.0) 408 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.989843+0000 mon.vm01 (mon.0) 409 : audit [INF] from='client.? 192.168.123.101:0/2809745158' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f8707e07-f8bc-46db-ad2f-0aa8f0a897b7"} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.989843+0000 mon.vm01 (mon.0) 409 : audit [INF] from='client.? 192.168.123.101:0/2809745158' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f8707e07-f8bc-46db-ad2f-0aa8f0a897b7"} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.994956+0000 mon.vm01 (mon.0) 410 : audit [INF] from='client.? 192.168.123.101:0/2809745158' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f8707e07-f8bc-46db-ad2f-0aa8f0a897b7"}]': finished 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.994956+0000 mon.vm01 (mon.0) 410 : audit [INF] from='client.? 192.168.123.101:0/2809745158' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f8707e07-f8bc-46db-ad2f-0aa8f0a897b7"}]': finished 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: cluster 2026-04-16T19:22:11.998700+0000 mon.vm01 (mon.0) 411 : cluster [DBG] osdmap e9: 4 total, 0 up, 4 in 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: cluster 2026-04-16T19:22:11.998700+0000 mon.vm01 (mon.0) 411 : cluster [DBG] osdmap e9: 4 total, 0 up, 4 in 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.998968+0000 mon.vm01 (mon.0) 412 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.998968+0000 mon.vm01 (mon.0) 412 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.999155+0000 mon.vm01 (mon.0) 413 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.999155+0000 mon.vm01 (mon.0) 413 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.999265+0000 mon.vm01 (mon.0) 414 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.999265+0000 mon.vm01 (mon.0) 414 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:12.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.999320+0000 mon.vm01 (mon.0) 415 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:12.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:12 vm01 bash[28222]: audit 2026-04-16T19:22:11.999320+0000 mon.vm01 (mon.0) 415 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:12.483 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:12.908 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:13.010 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1776367331,"num_remapped_pgs":0} 2026-04-16T19:22:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:13 vm01 bash[28222]: audit 2026-04-16T19:22:12.836640+0000 mon.vm04 (mon.1) 5 : audit [DBG] from='client.? 192.168.123.104:0/1729015962' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:13 vm01 bash[28222]: audit 2026-04-16T19:22:12.836640+0000 mon.vm04 (mon.1) 5 : audit [DBG] from='client.? 192.168.123.104:0/1729015962' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:13.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:13 vm01 bash[28222]: audit 2026-04-16T19:22:12.908024+0000 mon.vm01 (mon.0) 416 : audit [DBG] from='client.? 192.168.123.101:0/4223752273' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:13.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:13 vm01 bash[28222]: audit 2026-04-16T19:22:12.908024+0000 mon.vm01 (mon.0) 416 : audit [DBG] from='client.? 192.168.123.101:0/4223752273' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:13.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:13 vm01 bash[28222]: audit 2026-04-16T19:22:12.909511+0000 mon.vm01 (mon.0) 417 : audit [DBG] from='client.? 192.168.123.101:0/1417114819' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:13.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:13 vm01 bash[28222]: audit 2026-04-16T19:22:12.909511+0000 mon.vm01 (mon.0) 417 : audit [DBG] from='client.? 192.168.123.101:0/1417114819' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:13.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:13 vm04 bash[34817]: audit 2026-04-16T19:22:12.836640+0000 mon.vm04 (mon.1) 5 : audit [DBG] from='client.? 192.168.123.104:0/1729015962' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:13.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:13 vm04 bash[34817]: audit 2026-04-16T19:22:12.836640+0000 mon.vm04 (mon.1) 5 : audit [DBG] from='client.? 192.168.123.104:0/1729015962' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:13.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:13 vm04 bash[34817]: audit 2026-04-16T19:22:12.908024+0000 mon.vm01 (mon.0) 416 : audit [DBG] from='client.? 192.168.123.101:0/4223752273' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:13.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:13 vm04 bash[34817]: audit 2026-04-16T19:22:12.908024+0000 mon.vm01 (mon.0) 416 : audit [DBG] from='client.? 192.168.123.101:0/4223752273' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:13.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:13 vm04 bash[34817]: audit 2026-04-16T19:22:12.909511+0000 mon.vm01 (mon.0) 417 : audit [DBG] from='client.? 192.168.123.101:0/1417114819' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:13.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:13 vm04 bash[34817]: audit 2026-04-16T19:22:12.909511+0000 mon.vm01 (mon.0) 417 : audit [DBG] from='client.? 192.168.123.101:0/1417114819' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:14.012 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: cluster 2026-04-16T19:22:13.259801+0000 mgr.vm01.nwhpas (mgr.14227) 69 : cluster [DBG] pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: cluster 2026-04-16T19:22:13.259801+0000 mgr.vm01.nwhpas (mgr.14227) 69 : cluster [DBG] pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.980343+0000 mon.vm04 (mon.1) 6 : audit [INF] from='client.? 192.168.123.104:0/4012482755' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.980343+0000 mon.vm04 (mon.1) 6 : audit [INF] from='client.? 192.168.123.104:0/4012482755' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.981350+0000 mon.vm01 (mon.0) 418 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.981350+0000 mon.vm01 (mon.0) 418 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.990871+0000 mon.vm01 (mon.0) 419 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"}]': finished 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.990871+0000 mon.vm01 (mon.0) 419 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"}]': finished 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: cluster 2026-04-16T19:22:13.994145+0000 mon.vm01 (mon.0) 420 : cluster [DBG] osdmap e10: 5 total, 0 up, 5 in 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: cluster 2026-04-16T19:22:13.994145+0000 mon.vm01 (mon.0) 420 : cluster [DBG] osdmap e10: 5 total, 0 up, 5 in 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.994311+0000 mon.vm01 (mon.0) 421 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.994311+0000 mon.vm01 (mon.0) 421 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.994394+0000 mon.vm01 (mon.0) 422 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.994394+0000 mon.vm01 (mon.0) 422 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.994580+0000 mon.vm01 (mon.0) 423 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.994580+0000 mon.vm01 (mon.0) 423 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.994716+0000 mon.vm01 (mon.0) 424 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.994716+0000 mon.vm01 (mon.0) 424 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.994861+0000 mon.vm01 (mon.0) 425 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:14.308 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:14 vm01 bash[28222]: audit 2026-04-16T19:22:13.994861+0000 mon.vm01 (mon.0) 425 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:14.316 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:14.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: cluster 2026-04-16T19:22:13.259801+0000 mgr.vm01.nwhpas (mgr.14227) 69 : cluster [DBG] pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: cluster 2026-04-16T19:22:13.259801+0000 mgr.vm01.nwhpas (mgr.14227) 69 : cluster [DBG] pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.980343+0000 mon.vm04 (mon.1) 6 : audit [INF] from='client.? 192.168.123.104:0/4012482755' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.980343+0000 mon.vm04 (mon.1) 6 : audit [INF] from='client.? 192.168.123.104:0/4012482755' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.981350+0000 mon.vm01 (mon.0) 418 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.981350+0000 mon.vm01 (mon.0) 418 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.990871+0000 mon.vm01 (mon.0) 419 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"}]': finished 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.990871+0000 mon.vm01 (mon.0) 419 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0"}]': finished 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: cluster 2026-04-16T19:22:13.994145+0000 mon.vm01 (mon.0) 420 : cluster [DBG] osdmap e10: 5 total, 0 up, 5 in 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: cluster 2026-04-16T19:22:13.994145+0000 mon.vm01 (mon.0) 420 : cluster [DBG] osdmap e10: 5 total, 0 up, 5 in 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.994311+0000 mon.vm01 (mon.0) 421 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.994311+0000 mon.vm01 (mon.0) 421 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.994394+0000 mon.vm01 (mon.0) 422 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.994394+0000 mon.vm01 (mon.0) 422 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.994580+0000 mon.vm01 (mon.0) 423 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.994580+0000 mon.vm01 (mon.0) 423 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.994716+0000 mon.vm01 (mon.0) 424 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.994716+0000 mon.vm01 (mon.0) 424 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.994861+0000 mon.vm01 (mon.0) 425 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:14.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:14 vm04 bash[34817]: audit 2026-04-16T19:22:13.994861+0000 mon.vm01 (mon.0) 425 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:14.792 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:14.909 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1776367334,"num_remapped_pgs":0} 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.194460+0000 mon.vm01 (mon.0) 426 : audit [INF] from='client.? 192.168.123.101:0/1163553253' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "79479d78-c6c8-4447-be64-0e15c9cad5ce"} : dispatch 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.194460+0000 mon.vm01 (mon.0) 426 : audit [INF] from='client.? 192.168.123.101:0/1163553253' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "79479d78-c6c8-4447-be64-0e15c9cad5ce"} : dispatch 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.197680+0000 mon.vm01 (mon.0) 427 : audit [INF] from='client.? 192.168.123.101:0/1163553253' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "79479d78-c6c8-4447-be64-0e15c9cad5ce"}]': finished 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.197680+0000 mon.vm01 (mon.0) 427 : audit [INF] from='client.? 192.168.123.101:0/1163553253' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "79479d78-c6c8-4447-be64-0e15c9cad5ce"}]': finished 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: cluster 2026-04-16T19:22:14.201087+0000 mon.vm01 (mon.0) 428 : cluster [DBG] osdmap e11: 6 total, 0 up, 6 in 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: cluster 2026-04-16T19:22:14.201087+0000 mon.vm01 (mon.0) 428 : cluster [DBG] osdmap e11: 6 total, 0 up, 6 in 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.201388+0000 mon.vm01 (mon.0) 429 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.201388+0000 mon.vm01 (mon.0) 429 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.201615+0000 mon.vm01 (mon.0) 430 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.201615+0000 mon.vm01 (mon.0) 430 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.201861+0000 mon.vm01 (mon.0) 431 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.201861+0000 mon.vm01 (mon.0) 431 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:15.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.202064+0000 mon.vm01 (mon.0) 432 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:15.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.202064+0000 mon.vm01 (mon.0) 432 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:15.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.202306+0000 mon.vm01 (mon.0) 433 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:15.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.202306+0000 mon.vm01 (mon.0) 433 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:15.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.202626+0000 mon.vm01 (mon.0) 434 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:15.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.202626+0000 mon.vm01 (mon.0) 434 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:15.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.782159+0000 mon.vm04 (mon.1) 7 : audit [DBG] from='client.? 192.168.123.104:0/2612495634' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:15.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.782159+0000 mon.vm04 (mon.1) 7 : audit [DBG] from='client.? 192.168.123.104:0/2612495634' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:15.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.791657+0000 mon.vm01 (mon.0) 435 : audit [DBG] from='client.? 192.168.123.101:0/746863290' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:15.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:15 vm01 bash[28222]: audit 2026-04-16T19:22:14.791657+0000 mon.vm01 (mon.0) 435 : audit [DBG] from='client.? 192.168.123.101:0/746863290' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.194460+0000 mon.vm01 (mon.0) 426 : audit [INF] from='client.? 192.168.123.101:0/1163553253' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "79479d78-c6c8-4447-be64-0e15c9cad5ce"} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.194460+0000 mon.vm01 (mon.0) 426 : audit [INF] from='client.? 192.168.123.101:0/1163553253' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "79479d78-c6c8-4447-be64-0e15c9cad5ce"} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.197680+0000 mon.vm01 (mon.0) 427 : audit [INF] from='client.? 192.168.123.101:0/1163553253' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "79479d78-c6c8-4447-be64-0e15c9cad5ce"}]': finished 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.197680+0000 mon.vm01 (mon.0) 427 : audit [INF] from='client.? 192.168.123.101:0/1163553253' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "79479d78-c6c8-4447-be64-0e15c9cad5ce"}]': finished 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: cluster 2026-04-16T19:22:14.201087+0000 mon.vm01 (mon.0) 428 : cluster [DBG] osdmap e11: 6 total, 0 up, 6 in 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: cluster 2026-04-16T19:22:14.201087+0000 mon.vm01 (mon.0) 428 : cluster [DBG] osdmap e11: 6 total, 0 up, 6 in 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.201388+0000 mon.vm01 (mon.0) 429 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.201388+0000 mon.vm01 (mon.0) 429 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.201615+0000 mon.vm01 (mon.0) 430 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.201615+0000 mon.vm01 (mon.0) 430 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.201861+0000 mon.vm01 (mon.0) 431 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.201861+0000 mon.vm01 (mon.0) 431 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.202064+0000 mon.vm01 (mon.0) 432 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.202064+0000 mon.vm01 (mon.0) 432 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.202306+0000 mon.vm01 (mon.0) 433 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.202306+0000 mon.vm01 (mon.0) 433 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.202626+0000 mon.vm01 (mon.0) 434 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.202626+0000 mon.vm01 (mon.0) 434 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.782159+0000 mon.vm04 (mon.1) 7 : audit [DBG] from='client.? 192.168.123.104:0/2612495634' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.782159+0000 mon.vm04 (mon.1) 7 : audit [DBG] from='client.? 192.168.123.104:0/2612495634' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.791657+0000 mon.vm01 (mon.0) 435 : audit [DBG] from='client.? 192.168.123.101:0/746863290' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:15.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:15 vm04 bash[34817]: audit 2026-04-16T19:22:14.791657+0000 mon.vm01 (mon.0) 435 : audit [DBG] from='client.? 192.168.123.101:0/746863290' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:15.910 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.094818+0000 mon.vm01 (mon.0) 436 : audit [DBG] from='client.? 192.168.123.101:0/849078174' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.094818+0000 mon.vm01 (mon.0) 436 : audit [DBG] from='client.? 192.168.123.101:0/849078174' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: cluster 2026-04-16T19:22:15.259984+0000 mgr.vm01.nwhpas (mgr.14227) 70 : cluster [DBG] pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: cluster 2026-04-16T19:22:15.259984+0000 mgr.vm01.nwhpas (mgr.14227) 70 : cluster [DBG] pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.887327+0000 mon.vm04 (mon.1) 8 : audit [INF] from='client.? 192.168.123.104:0/816776130' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.887327+0000 mon.vm04 (mon.1) 8 : audit [INF] from='client.? 192.168.123.104:0/816776130' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.888201+0000 mon.vm01 (mon.0) 437 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.888201+0000 mon.vm01 (mon.0) 437 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.931316+0000 mon.vm01 (mon.0) 438 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"}]': finished 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.931316+0000 mon.vm01 (mon.0) 438 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"}]': finished 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: cluster 2026-04-16T19:22:15.934454+0000 mon.vm01 (mon.0) 439 : cluster [DBG] osdmap e12: 7 total, 0 up, 7 in 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: cluster 2026-04-16T19:22:15.934454+0000 mon.vm01 (mon.0) 439 : cluster [DBG] osdmap e12: 7 total, 0 up, 7 in 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.934800+0000 mon.vm01 (mon.0) 440 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.934800+0000 mon.vm01 (mon.0) 440 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.934911+0000 mon.vm01 (mon.0) 441 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.934911+0000 mon.vm01 (mon.0) 441 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.934988+0000 mon.vm01 (mon.0) 442 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.934988+0000 mon.vm01 (mon.0) 442 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.935060+0000 mon.vm01 (mon.0) 443 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.935060+0000 mon.vm01 (mon.0) 443 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.935148+0000 mon.vm01 (mon.0) 444 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.935148+0000 mon.vm01 (mon.0) 444 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.935231+0000 mon.vm01 (mon.0) 445 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.935231+0000 mon.vm01 (mon.0) 445 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.935305+0000 mon.vm01 (mon.0) 446 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:16.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:16 vm01 bash[28222]: audit 2026-04-16T19:22:15.935305+0000 mon.vm01 (mon.0) 446 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:16.227 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:16.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.094818+0000 mon.vm01 (mon.0) 436 : audit [DBG] from='client.? 192.168.123.101:0/849078174' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:16.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.094818+0000 mon.vm01 (mon.0) 436 : audit [DBG] from='client.? 192.168.123.101:0/849078174' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:16.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: cluster 2026-04-16T19:22:15.259984+0000 mgr.vm01.nwhpas (mgr.14227) 70 : cluster [DBG] pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: cluster 2026-04-16T19:22:15.259984+0000 mgr.vm01.nwhpas (mgr.14227) 70 : cluster [DBG] pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.887327+0000 mon.vm04 (mon.1) 8 : audit [INF] from='client.? 192.168.123.104:0/816776130' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.887327+0000 mon.vm04 (mon.1) 8 : audit [INF] from='client.? 192.168.123.104:0/816776130' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.888201+0000 mon.vm01 (mon.0) 437 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.888201+0000 mon.vm01 (mon.0) 437 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.931316+0000 mon.vm01 (mon.0) 438 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"}]': finished 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.931316+0000 mon.vm01 (mon.0) 438 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a60a41cd-e249-46a0-a399-39726704af0a"}]': finished 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: cluster 2026-04-16T19:22:15.934454+0000 mon.vm01 (mon.0) 439 : cluster [DBG] osdmap e12: 7 total, 0 up, 7 in 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: cluster 2026-04-16T19:22:15.934454+0000 mon.vm01 (mon.0) 439 : cluster [DBG] osdmap e12: 7 total, 0 up, 7 in 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.934800+0000 mon.vm01 (mon.0) 440 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.934800+0000 mon.vm01 (mon.0) 440 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.934911+0000 mon.vm01 (mon.0) 441 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.934911+0000 mon.vm01 (mon.0) 441 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.934988+0000 mon.vm01 (mon.0) 442 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.934988+0000 mon.vm01 (mon.0) 442 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.935060+0000 mon.vm01 (mon.0) 443 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.935060+0000 mon.vm01 (mon.0) 443 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.935148+0000 mon.vm01 (mon.0) 444 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.935148+0000 mon.vm01 (mon.0) 444 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.935231+0000 mon.vm01 (mon.0) 445 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.935231+0000 mon.vm01 (mon.0) 445 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.935305+0000 mon.vm01 (mon.0) 446 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:16.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:16 vm04 bash[34817]: audit 2026-04-16T19:22:15.935305+0000 mon.vm01 (mon.0) 446 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:16.698 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:16.785 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776367336,"num_remapped_pgs":0} 2026-04-16T19:22:17.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.223986+0000 mon.vm01 (mon.0) 447 : audit [INF] from='client.? 192.168.123.101:0/134161947' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "eb9ea244-fcd7-413a-98de-06ed25a24354"} : dispatch 2026-04-16T19:22:17.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.223986+0000 mon.vm01 (mon.0) 447 : audit [INF] from='client.? 192.168.123.101:0/134161947' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "eb9ea244-fcd7-413a-98de-06ed25a24354"} : dispatch 2026-04-16T19:22:17.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.227736+0000 mon.vm01 (mon.0) 448 : audit [INF] from='client.? 192.168.123.101:0/134161947' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "eb9ea244-fcd7-413a-98de-06ed25a24354"}]': finished 2026-04-16T19:22:17.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.227736+0000 mon.vm01 (mon.0) 448 : audit [INF] from='client.? 192.168.123.101:0/134161947' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "eb9ea244-fcd7-413a-98de-06ed25a24354"}]': finished 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: cluster 2026-04-16T19:22:16.232615+0000 mon.vm01 (mon.0) 449 : cluster [DBG] osdmap e13: 8 total, 0 up, 8 in 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: cluster 2026-04-16T19:22:16.232615+0000 mon.vm01 (mon.0) 449 : cluster [DBG] osdmap e13: 8 total, 0 up, 8 in 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.232975+0000 mon.vm01 (mon.0) 450 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.232975+0000 mon.vm01 (mon.0) 450 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233137+0000 mon.vm01 (mon.0) 451 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233137+0000 mon.vm01 (mon.0) 451 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233225+0000 mon.vm01 (mon.0) 452 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233225+0000 mon.vm01 (mon.0) 452 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233304+0000 mon.vm01 (mon.0) 453 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233304+0000 mon.vm01 (mon.0) 453 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233388+0000 mon.vm01 (mon.0) 454 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233388+0000 mon.vm01 (mon.0) 454 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233440+0000 mon.vm01 (mon.0) 455 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233440+0000 mon.vm01 (mon.0) 455 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233485+0000 mon.vm01 (mon.0) 456 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233485+0000 mon.vm01 (mon.0) 456 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233570+0000 mon.vm01 (mon.0) 457 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.233570+0000 mon.vm01 (mon.0) 457 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.697681+0000 mon.vm01 (mon.0) 458 : audit [DBG] from='client.? 192.168.123.101:0/2823882358' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.697681+0000 mon.vm01 (mon.0) 458 : audit [DBG] from='client.? 192.168.123.101:0/2823882358' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.868187+0000 mon.vm04 (mon.1) 9 : audit [DBG] from='client.? 192.168.123.104:0/2545154029' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:17.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:17 vm04 bash[34817]: audit 2026-04-16T19:22:16.868187+0000 mon.vm04 (mon.1) 9 : audit [DBG] from='client.? 192.168.123.104:0/2545154029' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.223986+0000 mon.vm01 (mon.0) 447 : audit [INF] from='client.? 192.168.123.101:0/134161947' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "eb9ea244-fcd7-413a-98de-06ed25a24354"} : dispatch 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.223986+0000 mon.vm01 (mon.0) 447 : audit [INF] from='client.? 192.168.123.101:0/134161947' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "eb9ea244-fcd7-413a-98de-06ed25a24354"} : dispatch 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.227736+0000 mon.vm01 (mon.0) 448 : audit [INF] from='client.? 192.168.123.101:0/134161947' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "eb9ea244-fcd7-413a-98de-06ed25a24354"}]': finished 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.227736+0000 mon.vm01 (mon.0) 448 : audit [INF] from='client.? 192.168.123.101:0/134161947' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "eb9ea244-fcd7-413a-98de-06ed25a24354"}]': finished 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: cluster 2026-04-16T19:22:16.232615+0000 mon.vm01 (mon.0) 449 : cluster [DBG] osdmap e13: 8 total, 0 up, 8 in 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: cluster 2026-04-16T19:22:16.232615+0000 mon.vm01 (mon.0) 449 : cluster [DBG] osdmap e13: 8 total, 0 up, 8 in 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.232975+0000 mon.vm01 (mon.0) 450 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.232975+0000 mon.vm01 (mon.0) 450 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233137+0000 mon.vm01 (mon.0) 451 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233137+0000 mon.vm01 (mon.0) 451 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233225+0000 mon.vm01 (mon.0) 452 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233225+0000 mon.vm01 (mon.0) 452 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233304+0000 mon.vm01 (mon.0) 453 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233304+0000 mon.vm01 (mon.0) 453 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:17.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233388+0000 mon.vm01 (mon.0) 454 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:17.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233388+0000 mon.vm01 (mon.0) 454 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:17.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233440+0000 mon.vm01 (mon.0) 455 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:17.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233440+0000 mon.vm01 (mon.0) 455 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:17.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233485+0000 mon.vm01 (mon.0) 456 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:17.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233485+0000 mon.vm01 (mon.0) 456 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:17.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233570+0000 mon.vm01 (mon.0) 457 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:17.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.233570+0000 mon.vm01 (mon.0) 457 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:17.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.697681+0000 mon.vm01 (mon.0) 458 : audit [DBG] from='client.? 192.168.123.101:0/2823882358' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:17.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.697681+0000 mon.vm01 (mon.0) 458 : audit [DBG] from='client.? 192.168.123.101:0/2823882358' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:17.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.868187+0000 mon.vm04 (mon.1) 9 : audit [DBG] from='client.? 192.168.123.104:0/2545154029' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:17.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:17 vm01 bash[28222]: audit 2026-04-16T19:22:16.868187+0000 mon.vm04 (mon.1) 9 : audit [DBG] from='client.? 192.168.123.104:0/2545154029' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:17.786 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:18.085 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:18 vm04 bash[34817]: audit 2026-04-16T19:22:17.125482+0000 mon.vm01 (mon.0) 459 : audit [DBG] from='client.? 192.168.123.101:0/3727449557' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:18.085 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:18 vm04 bash[34817]: audit 2026-04-16T19:22:17.125482+0000 mon.vm01 (mon.0) 459 : audit [DBG] from='client.? 192.168.123.101:0/3727449557' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:18.085 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:18 vm04 bash[34817]: cluster 2026-04-16T19:22:17.260200+0000 mgr.vm01.nwhpas (mgr.14227) 71 : cluster [DBG] pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:18.085 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:18 vm04 bash[34817]: cluster 2026-04-16T19:22:17.260200+0000 mgr.vm01.nwhpas (mgr.14227) 71 : cluster [DBG] pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:18.128 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:18.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:18 vm01 bash[28222]: audit 2026-04-16T19:22:17.125482+0000 mon.vm01 (mon.0) 459 : audit [DBG] from='client.? 192.168.123.101:0/3727449557' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:18.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:18 vm01 bash[28222]: audit 2026-04-16T19:22:17.125482+0000 mon.vm01 (mon.0) 459 : audit [DBG] from='client.? 192.168.123.101:0/3727449557' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-16T19:22:18.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:18 vm01 bash[28222]: cluster 2026-04-16T19:22:17.260200+0000 mgr.vm01.nwhpas (mgr.14227) 71 : cluster [DBG] pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:18.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:18 vm01 bash[28222]: cluster 2026-04-16T19:22:17.260200+0000 mgr.vm01.nwhpas (mgr.14227) 71 : cluster [DBG] pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:18.550 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:18.638 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776367336,"num_remapped_pgs":0} 2026-04-16T19:22:19.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:19 vm04 bash[34817]: audit 2026-04-16T19:22:18.549861+0000 mon.vm01 (mon.0) 460 : audit [DBG] from='client.? 192.168.123.101:0/1871328100' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:19.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:19 vm04 bash[34817]: audit 2026-04-16T19:22:18.549861+0000 mon.vm01 (mon.0) 460 : audit [DBG] from='client.? 192.168.123.101:0/1871328100' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:19.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:19 vm04 bash[34817]: audit 2026-04-16T19:22:18.995292+0000 mon.vm01 (mon.0) 461 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-16T19:22:19.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:19 vm04 bash[34817]: audit 2026-04-16T19:22:18.995292+0000 mon.vm01 (mon.0) 461 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-16T19:22:19.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:19 vm04 bash[34817]: audit 2026-04-16T19:22:18.996379+0000 mon.vm01 (mon.0) 462 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:19.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:19 vm04 bash[34817]: audit 2026-04-16T19:22:18.996379+0000 mon.vm01 (mon.0) 462 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:19.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:19 vm01 bash[28222]: audit 2026-04-16T19:22:18.549861+0000 mon.vm01 (mon.0) 460 : audit [DBG] from='client.? 192.168.123.101:0/1871328100' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:19.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:19 vm01 bash[28222]: audit 2026-04-16T19:22:18.549861+0000 mon.vm01 (mon.0) 460 : audit [DBG] from='client.? 192.168.123.101:0/1871328100' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:19.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:19 vm01 bash[28222]: audit 2026-04-16T19:22:18.995292+0000 mon.vm01 (mon.0) 461 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-16T19:22:19.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:19 vm01 bash[28222]: audit 2026-04-16T19:22:18.995292+0000 mon.vm01 (mon.0) 461 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-16T19:22:19.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:19 vm01 bash[28222]: audit 2026-04-16T19:22:18.996379+0000 mon.vm01 (mon.0) 462 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:19.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:19 vm01 bash[28222]: audit 2026-04-16T19:22:18.996379+0000 mon.vm01 (mon.0) 462 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:19.639 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:19.933 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:20.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 bash[34817]: cephadm 2026-04-16T19:22:18.997040+0000 mgr.vm01.nwhpas (mgr.14227) 72 : cephadm [INF] Deploying daemon osd.0 on vm04 2026-04-16T19:22:20.213 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 bash[34817]: cephadm 2026-04-16T19:22:18.997040+0000 mgr.vm01.nwhpas (mgr.14227) 72 : cephadm [INF] Deploying daemon osd.0 on vm04 2026-04-16T19:22:20.213 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 bash[34817]: cluster 2026-04-16T19:22:19.260364+0000 mgr.vm01.nwhpas (mgr.14227) 73 : cluster [DBG] pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:20.213 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 bash[34817]: cluster 2026-04-16T19:22:19.260364+0000 mgr.vm01.nwhpas (mgr.14227) 73 : cluster [DBG] pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:20.213 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 bash[34817]: audit 2026-04-16T19:22:19.275204+0000 mon.vm01 (mon.0) 463 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-16T19:22:20.213 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 bash[34817]: audit 2026-04-16T19:22:19.275204+0000 mon.vm01 (mon.0) 463 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-16T19:22:20.213 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 bash[34817]: audit 2026-04-16T19:22:19.275839+0000 mon.vm01 (mon.0) 464 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:20.213 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 bash[34817]: audit 2026-04-16T19:22:19.275839+0000 mon.vm01 (mon.0) 464 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:20.213 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 bash[34817]: cephadm 2026-04-16T19:22:19.276311+0000 mgr.vm01.nwhpas (mgr.14227) 74 : cephadm [INF] Deploying daemon osd.1 on vm01 2026-04-16T19:22:20.213 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 bash[34817]: cephadm 2026-04-16T19:22:19.276311+0000 mgr.vm01.nwhpas (mgr.14227) 74 : cephadm [INF] Deploying daemon osd.1 on vm01 2026-04-16T19:22:20.213 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:20.359 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:20.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 bash[28222]: cephadm 2026-04-16T19:22:18.997040+0000 mgr.vm01.nwhpas (mgr.14227) 72 : cephadm [INF] Deploying daemon osd.0 on vm04 2026-04-16T19:22:20.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 bash[28222]: cephadm 2026-04-16T19:22:18.997040+0000 mgr.vm01.nwhpas (mgr.14227) 72 : cephadm [INF] Deploying daemon osd.0 on vm04 2026-04-16T19:22:20.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 bash[28222]: cluster 2026-04-16T19:22:19.260364+0000 mgr.vm01.nwhpas (mgr.14227) 73 : cluster [DBG] pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:20.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 bash[28222]: cluster 2026-04-16T19:22:19.260364+0000 mgr.vm01.nwhpas (mgr.14227) 73 : cluster [DBG] pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:20.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 bash[28222]: audit 2026-04-16T19:22:19.275204+0000 mon.vm01 (mon.0) 463 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-16T19:22:20.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 bash[28222]: audit 2026-04-16T19:22:19.275204+0000 mon.vm01 (mon.0) 463 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-16T19:22:20.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 bash[28222]: audit 2026-04-16T19:22:19.275839+0000 mon.vm01 (mon.0) 464 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:20.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 bash[28222]: audit 2026-04-16T19:22:19.275839+0000 mon.vm01 (mon.0) 464 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:20.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 bash[28222]: cephadm 2026-04-16T19:22:19.276311+0000 mgr.vm01.nwhpas (mgr.14227) 74 : cephadm [INF] Deploying daemon osd.1 on vm01 2026-04-16T19:22:20.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 bash[28222]: cephadm 2026-04-16T19:22:19.276311+0000 mgr.vm01.nwhpas (mgr.14227) 74 : cephadm [INF] Deploying daemon osd.1 on vm01 2026-04-16T19:22:20.539 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:20 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:20.750 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776367336,"num_remapped_pgs":0} 2026-04-16T19:22:20.770 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:21.002 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:20 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.356233+0000 mon.vm01 (mon.0) 465 : audit [DBG] from='client.? 192.168.123.101:0/3263102158' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.356233+0000 mon.vm01 (mon.0) 465 : audit [DBG] from='client.? 192.168.123.101:0/3263102158' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.692018+0000 mon.vm01 (mon.0) 466 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.692018+0000 mon.vm01 (mon.0) 466 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.712914+0000 mon.vm01 (mon.0) 467 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.712914+0000 mon.vm01 (mon.0) 467 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.713762+0000 mon.vm01 (mon.0) 468 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.713762+0000 mon.vm01 (mon.0) 468 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.714623+0000 mon.vm01 (mon.0) 469 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.714623+0000 mon.vm01 (mon.0) 469 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: cephadm 2026-04-16T19:22:20.715253+0000 mgr.vm01.nwhpas (mgr.14227) 75 : cephadm [INF] Deploying daemon osd.2 on vm04 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: cephadm 2026-04-16T19:22:20.715253+0000 mgr.vm01.nwhpas (mgr.14227) 75 : cephadm [INF] Deploying daemon osd.2 on vm04 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.927206+0000 mon.vm01 (mon.0) 470 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.927206+0000 mon.vm01 (mon.0) 470 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.934962+0000 mon.vm01 (mon.0) 471 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.934962+0000 mon.vm01 (mon.0) 471 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.936740+0000 mon.vm01 (mon.0) 472 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.936740+0000 mon.vm01 (mon.0) 472 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.938198+0000 mon.vm01 (mon.0) 473 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:21.328 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:21 vm04 bash[34817]: audit 2026-04-16T19:22:20.938198+0000 mon.vm01 (mon.0) 473 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:21.375 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.356233+0000 mon.vm01 (mon.0) 465 : audit [DBG] from='client.? 192.168.123.101:0/3263102158' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.356233+0000 mon.vm01 (mon.0) 465 : audit [DBG] from='client.? 192.168.123.101:0/3263102158' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.692018+0000 mon.vm01 (mon.0) 466 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.692018+0000 mon.vm01 (mon.0) 466 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.712914+0000 mon.vm01 (mon.0) 467 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.712914+0000 mon.vm01 (mon.0) 467 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.713762+0000 mon.vm01 (mon.0) 468 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.713762+0000 mon.vm01 (mon.0) 468 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.714623+0000 mon.vm01 (mon.0) 469 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.714623+0000 mon.vm01 (mon.0) 469 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: cephadm 2026-04-16T19:22:20.715253+0000 mgr.vm01.nwhpas (mgr.14227) 75 : cephadm [INF] Deploying daemon osd.2 on vm04 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: cephadm 2026-04-16T19:22:20.715253+0000 mgr.vm01.nwhpas (mgr.14227) 75 : cephadm [INF] Deploying daemon osd.2 on vm04 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.927206+0000 mon.vm01 (mon.0) 470 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.927206+0000 mon.vm01 (mon.0) 470 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.934962+0000 mon.vm01 (mon.0) 471 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.934962+0000 mon.vm01 (mon.0) 471 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.936740+0000 mon.vm01 (mon.0) 472 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.936740+0000 mon.vm01 (mon.0) 472 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.938198+0000 mon.vm01 (mon.0) 473 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:21.376 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:21 vm01 bash[28222]: audit 2026-04-16T19:22:20.938198+0000 mon.vm01 (mon.0) 473 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:21.751 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:22.074 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:22.310 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:22 vm01 bash[28222]: cephadm 2026-04-16T19:22:20.939051+0000 mgr.vm01.nwhpas (mgr.14227) 76 : cephadm [INF] Deploying daemon osd.3 on vm01 2026-04-16T19:22:22.310 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:22 vm01 bash[28222]: cephadm 2026-04-16T19:22:20.939051+0000 mgr.vm01.nwhpas (mgr.14227) 76 : cephadm [INF] Deploying daemon osd.3 on vm01 2026-04-16T19:22:22.310 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:22 vm01 bash[28222]: cluster 2026-04-16T19:22:21.260540+0000 mgr.vm01.nwhpas (mgr.14227) 77 : cluster [DBG] pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:22.310 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:22 vm01 bash[28222]: cluster 2026-04-16T19:22:21.260540+0000 mgr.vm01.nwhpas (mgr.14227) 77 : cluster [DBG] pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:22.361 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:22 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:22.361 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:22 vm04 bash[34817]: cephadm 2026-04-16T19:22:20.939051+0000 mgr.vm01.nwhpas (mgr.14227) 76 : cephadm [INF] Deploying daemon osd.3 on vm01 2026-04-16T19:22:22.361 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:22 vm04 bash[34817]: cephadm 2026-04-16T19:22:20.939051+0000 mgr.vm01.nwhpas (mgr.14227) 76 : cephadm [INF] Deploying daemon osd.3 on vm01 2026-04-16T19:22:22.361 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:22 vm04 bash[34817]: cluster 2026-04-16T19:22:21.260540+0000 mgr.vm01.nwhpas (mgr.14227) 77 : cluster [DBG] pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:22.362 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:22 vm04 bash[34817]: cluster 2026-04-16T19:22:21.260540+0000 mgr.vm01.nwhpas (mgr.14227) 77 : cluster [DBG] pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:22.560 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:22 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:22.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:22 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:22.901 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:22 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:23.027 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:23.179 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776367336,"num_remapped_pgs":0} 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.507054+0000 mon.vm01 (mon.0) 474 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.507054+0000 mon.vm01 (mon.0) 474 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.513505+0000 mon.vm01 (mon.0) 475 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.513505+0000 mon.vm01 (mon.0) 475 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.514382+0000 mon.vm01 (mon.0) 476 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.514382+0000 mon.vm01 (mon.0) 476 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.515332+0000 mon.vm01 (mon.0) 477 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.515332+0000 mon.vm01 (mon.0) 477 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: cephadm 2026-04-16T19:22:22.517746+0000 mgr.vm01.nwhpas (mgr.14227) 78 : cephadm [INF] Deploying daemon osd.4 on vm04 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: cephadm 2026-04-16T19:22:22.517746+0000 mgr.vm01.nwhpas (mgr.14227) 78 : cephadm [INF] Deploying daemon osd.4 on vm04 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.558312+0000 mon.vm01 (mon.0) 478 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.558312+0000 mon.vm01 (mon.0) 478 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.690767+0000 mon.vm01 (mon.0) 479 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.690767+0000 mon.vm01 (mon.0) 479 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.698531+0000 mon.vm01 (mon.0) 480 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.682 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.698531+0000 mon.vm01 (mon.0) 480 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.683 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.704374+0000 mon.vm01 (mon.0) 481 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-16T19:22:23.683 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.704374+0000 mon.vm01 (mon.0) 481 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-16T19:22:23.683 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.706058+0000 mon.vm01 (mon.0) 482 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:23.683 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:22.706058+0000 mon.vm01 (mon.0) 482 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:23.683 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: cephadm 2026-04-16T19:22:22.706696+0000 mgr.vm01.nwhpas (mgr.14227) 79 : cephadm [INF] Deploying daemon osd.5 on vm01 2026-04-16T19:22:23.683 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: cephadm 2026-04-16T19:22:22.706696+0000 mgr.vm01.nwhpas (mgr.14227) 79 : cephadm [INF] Deploying daemon osd.5 on vm01 2026-04-16T19:22:23.683 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:23.025583+0000 mon.vm01 (mon.0) 483 : audit [DBG] from='client.? 192.168.123.101:0/4123872000' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:23.683 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:23 vm04 bash[34817]: audit 2026-04-16T19:22:23.025583+0000 mon.vm01 (mon.0) 483 : audit [DBG] from='client.? 192.168.123.101:0/4123872000' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:23.701 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.507054+0000 mon.vm01 (mon.0) 474 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.701 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.507054+0000 mon.vm01 (mon.0) 474 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.701 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.513505+0000 mon.vm01 (mon.0) 475 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.701 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.513505+0000 mon.vm01 (mon.0) 475 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.514382+0000 mon.vm01 (mon.0) 476 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.514382+0000 mon.vm01 (mon.0) 476 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.515332+0000 mon.vm01 (mon.0) 477 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.515332+0000 mon.vm01 (mon.0) 477 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: cephadm 2026-04-16T19:22:22.517746+0000 mgr.vm01.nwhpas (mgr.14227) 78 : cephadm [INF] Deploying daemon osd.4 on vm04 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: cephadm 2026-04-16T19:22:22.517746+0000 mgr.vm01.nwhpas (mgr.14227) 78 : cephadm [INF] Deploying daemon osd.4 on vm04 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.558312+0000 mon.vm01 (mon.0) 478 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.558312+0000 mon.vm01 (mon.0) 478 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.690767+0000 mon.vm01 (mon.0) 479 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.690767+0000 mon.vm01 (mon.0) 479 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.698531+0000 mon.vm01 (mon.0) 480 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.698531+0000 mon.vm01 (mon.0) 480 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.704374+0000 mon.vm01 (mon.0) 481 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.704374+0000 mon.vm01 (mon.0) 481 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.706058+0000 mon.vm01 (mon.0) 482 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:22.706058+0000 mon.vm01 (mon.0) 482 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: cephadm 2026-04-16T19:22:22.706696+0000 mgr.vm01.nwhpas (mgr.14227) 79 : cephadm [INF] Deploying daemon osd.5 on vm01 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: cephadm 2026-04-16T19:22:22.706696+0000 mgr.vm01.nwhpas (mgr.14227) 79 : cephadm [INF] Deploying daemon osd.5 on vm01 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:23.025583+0000 mon.vm01 (mon.0) 483 : audit [DBG] from='client.? 192.168.123.101:0/4123872000' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:23.702 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:23 vm01 bash[28222]: audit 2026-04-16T19:22:23.025583+0000 mon.vm01 (mon.0) 483 : audit [DBG] from='client.? 192.168.123.101:0/4123872000' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:24.180 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:24.306 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:24.439 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:24.521 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:24.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:24.914 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:24.915 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 bash[28222]: cluster 2026-04-16T19:22:23.260766+0000 mgr.vm01.nwhpas (mgr.14227) 80 : cluster [DBG] pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:24.915 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 bash[28222]: cluster 2026-04-16T19:22:23.260766+0000 mgr.vm01.nwhpas (mgr.14227) 80 : cluster [DBG] pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:24.915 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 bash[28222]: audit 2026-04-16T19:22:24.479622+0000 mon.vm01 (mon.0) 484 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:24.915 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 bash[28222]: audit 2026-04-16T19:22:24.479622+0000 mon.vm01 (mon.0) 484 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:24.915 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 bash[28222]: audit 2026-04-16T19:22:24.488243+0000 mon.vm01 (mon.0) 485 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:24.915 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 bash[28222]: audit 2026-04-16T19:22:24.488243+0000 mon.vm01 (mon.0) 485 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:24.915 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 bash[28222]: audit 2026-04-16T19:22:24.500221+0000 mon.vm01 (mon.0) 486 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-16T19:22:24.915 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 bash[28222]: audit 2026-04-16T19:22:24.500221+0000 mon.vm01 (mon.0) 486 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-16T19:22:24.915 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 bash[28222]: audit 2026-04-16T19:22:24.501033+0000 mon.vm01 (mon.0) 487 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:24.915 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:24 vm01 bash[28222]: audit 2026-04-16T19:22:24.501033+0000 mon.vm01 (mon.0) 487 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:24.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 bash[34817]: cluster 2026-04-16T19:22:23.260766+0000 mgr.vm01.nwhpas (mgr.14227) 80 : cluster [DBG] pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:24.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 bash[34817]: cluster 2026-04-16T19:22:23.260766+0000 mgr.vm01.nwhpas (mgr.14227) 80 : cluster [DBG] pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:24.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 bash[34817]: audit 2026-04-16T19:22:24.479622+0000 mon.vm01 (mon.0) 484 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:24.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 bash[34817]: audit 2026-04-16T19:22:24.479622+0000 mon.vm01 (mon.0) 484 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:24.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 bash[34817]: audit 2026-04-16T19:22:24.488243+0000 mon.vm01 (mon.0) 485 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:24.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 bash[34817]: audit 2026-04-16T19:22:24.488243+0000 mon.vm01 (mon.0) 485 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:24.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 bash[34817]: audit 2026-04-16T19:22:24.500221+0000 mon.vm01 (mon.0) 486 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-16T19:22:24.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 bash[34817]: audit 2026-04-16T19:22:24.500221+0000 mon.vm01 (mon.0) 486 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-16T19:22:24.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 bash[34817]: audit 2026-04-16T19:22:24.501033+0000 mon.vm01 (mon.0) 487 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:24.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:24 vm04 bash[34817]: audit 2026-04-16T19:22:24.501033+0000 mon.vm01 (mon.0) 487 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:25.155 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:25.287 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776367336,"num_remapped_pgs":0} 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: cephadm 2026-04-16T19:22:24.501575+0000 mgr.vm01.nwhpas (mgr.14227) 81 : cephadm [INF] Deploying daemon osd.6 on vm04 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: cephadm 2026-04-16T19:22:24.501575+0000 mgr.vm01.nwhpas (mgr.14227) 81 : cephadm [INF] Deploying daemon osd.6 on vm04 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.736768+0000 mon.vm04 (mon.1) 10 : audit [INF] from='osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.736768+0000 mon.vm04 (mon.1) 10 : audit [INF] from='osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.738100+0000 mon.vm01 (mon.0) 488 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.738100+0000 mon.vm01 (mon.0) 488 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.761371+0000 mon.vm01 (mon.0) 489 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.761371+0000 mon.vm01 (mon.0) 489 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.777768+0000 mon.vm01 (mon.0) 490 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.777768+0000 mon.vm01 (mon.0) 490 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.779567+0000 mon.vm01 (mon.0) 491 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-16T19:22:25.795 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.779567+0000 mon.vm01 (mon.0) 491 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-16T19:22:25.796 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.781242+0000 mon.vm01 (mon.0) 492 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:25.796 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.781242+0000 mon.vm01 (mon.0) 492 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:25.796 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: cephadm 2026-04-16T19:22:24.781826+0000 mgr.vm01.nwhpas (mgr.14227) 82 : cephadm [INF] Deploying daemon osd.7 on vm01 2026-04-16T19:22:25.796 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: cephadm 2026-04-16T19:22:24.781826+0000 mgr.vm01.nwhpas (mgr.14227) 82 : cephadm [INF] Deploying daemon osd.7 on vm01 2026-04-16T19:22:25.796 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.921791+0000 mon.vm01 (mon.0) 493 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-16T19:22:25.796 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:24.921791+0000 mon.vm01 (mon.0) 493 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-16T19:22:25.796 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:25.153913+0000 mon.vm01 (mon.0) 494 : audit [DBG] from='client.? 192.168.123.101:0/802400459' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:25.796 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 bash[34817]: audit 2026-04-16T19:22:25.153913+0000 mon.vm01 (mon.0) 494 : audit [DBG] from='client.? 192.168.123.101:0/802400459' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:25.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: cephadm 2026-04-16T19:22:24.501575+0000 mgr.vm01.nwhpas (mgr.14227) 81 : cephadm [INF] Deploying daemon osd.6 on vm04 2026-04-16T19:22:25.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: cephadm 2026-04-16T19:22:24.501575+0000 mgr.vm01.nwhpas (mgr.14227) 81 : cephadm [INF] Deploying daemon osd.6 on vm04 2026-04-16T19:22:25.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.736768+0000 mon.vm04 (mon.1) 10 : audit [INF] from='osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-16T19:22:25.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.736768+0000 mon.vm04 (mon.1) 10 : audit [INF] from='osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-16T19:22:25.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.738100+0000 mon.vm01 (mon.0) 488 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-16T19:22:25.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.738100+0000 mon.vm01 (mon.0) 488 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.761371+0000 mon.vm01 (mon.0) 489 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.761371+0000 mon.vm01 (mon.0) 489 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.777768+0000 mon.vm01 (mon.0) 490 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.777768+0000 mon.vm01 (mon.0) 490 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.779567+0000 mon.vm01 (mon.0) 491 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.779567+0000 mon.vm01 (mon.0) 491 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.781242+0000 mon.vm01 (mon.0) 492 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.781242+0000 mon.vm01 (mon.0) 492 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: cephadm 2026-04-16T19:22:24.781826+0000 mgr.vm01.nwhpas (mgr.14227) 82 : cephadm [INF] Deploying daemon osd.7 on vm01 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: cephadm 2026-04-16T19:22:24.781826+0000 mgr.vm01.nwhpas (mgr.14227) 82 : cephadm [INF] Deploying daemon osd.7 on vm01 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.921791+0000 mon.vm01 (mon.0) 493 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:24.921791+0000 mon.vm01 (mon.0) 493 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:25.153913+0000 mon.vm01 (mon.0) 494 : audit [DBG] from='client.? 192.168.123.101:0/802400459' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:25.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:25 vm01 bash[28222]: audit 2026-04-16T19:22:25.153913+0000 mon.vm01 (mon.0) 494 : audit [DBG] from='client.? 192.168.123.101:0/802400459' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:26.089 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:25 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:26.288 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:26.390 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:26.589 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:26.638 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:26.917 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: cluster 2026-04-16T19:22:25.261018+0000 mgr.vm01.nwhpas (mgr.14227) 83 : cluster [DBG] pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: cluster 2026-04-16T19:22:25.261018+0000 mgr.vm01.nwhpas (mgr.14227) 83 : cluster [DBG] pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.714878+0000 mon.vm01 (mon.0) 495 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]}]': finished 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.714878+0000 mon.vm01 (mon.0) 495 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]}]': finished 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.714943+0000 mon.vm01 (mon.0) 496 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]}]': finished 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.714943+0000 mon.vm01 (mon.0) 496 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]}]': finished 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: cluster 2026-04-16T19:22:25.717843+0000 mon.vm01 (mon.0) 497 : cluster [DBG] osdmap e14: 8 total, 0 up, 8 in 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: cluster 2026-04-16T19:22:25.717843+0000 mon.vm01 (mon.0) 497 : cluster [DBG] osdmap e14: 8 total, 0 up, 8 in 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.719231+0000 mon.vm01 (mon.0) 498 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.719231+0000 mon.vm01 (mon.0) 498 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.719523+0000 mon.vm01 (mon.0) 499 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.719523+0000 mon.vm01 (mon.0) 499 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.719661+0000 mon.vm01 (mon.0) 500 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.719661+0000 mon.vm01 (mon.0) 500 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.719785+0000 mon.vm01 (mon.0) 501 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.719785+0000 mon.vm01 (mon.0) 501 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.719918+0000 mon.vm01 (mon.0) 502 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.719918+0000 mon.vm01 (mon.0) 502 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.720031+0000 mon.vm01 (mon.0) 503 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.720031+0000 mon.vm01 (mon.0) 503 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.720125+0000 mon.vm01 (mon.0) 504 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.720125+0000 mon.vm01 (mon.0) 504 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.720217+0000 mon.vm01 (mon.0) 505 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.720217+0000 mon.vm01 (mon.0) 505 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.720301+0000 mon.vm01 (mon.0) 506 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.720301+0000 mon.vm01 (mon.0) 506 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.723087+0000 mon.vm04 (mon.1) 11 : audit [INF] from='osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.723087+0000 mon.vm04 (mon.1) 11 : audit [INF] from='osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.732787+0000 mon.vm01 (mon.0) 507 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:25.732787+0000 mon.vm01 (mon.0) 507 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:26.293696+0000 mon.vm01 (mon.0) 508 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:26.293696+0000 mon.vm01 (mon.0) 508 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:26.401510+0000 mon.vm01 (mon.0) 509 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:26.401510+0000 mon.vm01 (mon.0) 509 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:26.412581+0000 mon.vm01 (mon.0) 510 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:26.918 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:26 vm01 bash[28222]: audit 2026-04-16T19:22:26.412581+0000 mon.vm01 (mon.0) 510 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:26.999 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: cluster 2026-04-16T19:22:25.261018+0000 mgr.vm01.nwhpas (mgr.14227) 83 : cluster [DBG] pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:26.999 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: cluster 2026-04-16T19:22:25.261018+0000 mgr.vm01.nwhpas (mgr.14227) 83 : cluster [DBG] pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.714878+0000 mon.vm01 (mon.0) 495 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]}]': finished 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.714878+0000 mon.vm01 (mon.0) 495 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]}]': finished 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.714943+0000 mon.vm01 (mon.0) 496 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]}]': finished 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.714943+0000 mon.vm01 (mon.0) 496 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]}]': finished 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: cluster 2026-04-16T19:22:25.717843+0000 mon.vm01 (mon.0) 497 : cluster [DBG] osdmap e14: 8 total, 0 up, 8 in 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: cluster 2026-04-16T19:22:25.717843+0000 mon.vm01 (mon.0) 497 : cluster [DBG] osdmap e14: 8 total, 0 up, 8 in 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.719231+0000 mon.vm01 (mon.0) 498 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.719231+0000 mon.vm01 (mon.0) 498 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.719523+0000 mon.vm01 (mon.0) 499 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.719523+0000 mon.vm01 (mon.0) 499 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.719661+0000 mon.vm01 (mon.0) 500 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.719661+0000 mon.vm01 (mon.0) 500 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.719785+0000 mon.vm01 (mon.0) 501 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.719785+0000 mon.vm01 (mon.0) 501 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.719918+0000 mon.vm01 (mon.0) 502 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.719918+0000 mon.vm01 (mon.0) 502 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.720031+0000 mon.vm01 (mon.0) 503 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.720031+0000 mon.vm01 (mon.0) 503 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.720125+0000 mon.vm01 (mon.0) 504 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.720125+0000 mon.vm01 (mon.0) 504 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.720217+0000 mon.vm01 (mon.0) 505 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.720217+0000 mon.vm01 (mon.0) 505 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.720301+0000 mon.vm01 (mon.0) 506 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.720301+0000 mon.vm01 (mon.0) 506 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.723087+0000 mon.vm04 (mon.1) 11 : audit [INF] from='osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.723087+0000 mon.vm04 (mon.1) 11 : audit [INF] from='osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.732787+0000 mon.vm01 (mon.0) 507 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:25.732787+0000 mon.vm01 (mon.0) 507 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:26.293696+0000 mon.vm01 (mon.0) 508 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:26.293696+0000 mon.vm01 (mon.0) 508 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:26.401510+0000 mon.vm01 (mon.0) 509 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:26.401510+0000 mon.vm01 (mon.0) 509 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:26.412581+0000 mon.vm01 (mon.0) 510 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:27.000 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:26 vm04 bash[34817]: audit 2026-04-16T19:22:26.412581+0000 mon.vm01 (mon.0) 510 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:27.176 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:27.284 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":15,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776367336,"num_remapped_pgs":0} 2026-04-16T19:22:28.070 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.727593+0000 mon.vm01 (mon.0) 511 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:28.070 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.727593+0000 mon.vm01 (mon.0) 511 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:28.070 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.727887+0000 mon.vm01 (mon.0) 512 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:28.070 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.727887+0000 mon.vm01 (mon.0) 512 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:28.070 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.727926+0000 mon.vm01 (mon.0) 513 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]}]': finished 2026-04-16T19:22:28.070 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.727926+0000 mon.vm01 (mon.0) 513 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]}]': finished 2026-04-16T19:22:28.070 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: cluster 2026-04-16T19:22:26.730291+0000 mon.vm01 (mon.0) 514 : cluster [DBG] osdmap e15: 8 total, 0 up, 8 in 2026-04-16T19:22:28.070 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: cluster 2026-04-16T19:22:26.730291+0000 mon.vm01 (mon.0) 514 : cluster [DBG] osdmap e15: 8 total, 0 up, 8 in 2026-04-16T19:22:28.070 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.733599+0000 mon.vm01 (mon.0) 515 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:28.070 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.733599+0000 mon.vm01 (mon.0) 515 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.733765+0000 mon.vm01 (mon.0) 516 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.733765+0000 mon.vm01 (mon.0) 516 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734232+0000 mon.vm01 (mon.0) 517 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734232+0000 mon.vm01 (mon.0) 517 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734333+0000 mon.vm01 (mon.0) 518 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734333+0000 mon.vm01 (mon.0) 518 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734424+0000 mon.vm01 (mon.0) 519 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734424+0000 mon.vm01 (mon.0) 519 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734501+0000 mon.vm01 (mon.0) 520 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734501+0000 mon.vm01 (mon.0) 520 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734589+0000 mon.vm01 (mon.0) 521 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734589+0000 mon.vm01 (mon.0) 521 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734735+0000 mon.vm01 (mon.0) 522 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734735+0000 mon.vm01 (mon.0) 522 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734827+0000 mon.vm01 (mon.0) 523 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.734827+0000 mon.vm01 (mon.0) 523 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.744392+0000 mon.vm01 (mon.0) 524 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.744392+0000 mon.vm01 (mon.0) 524 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.769969+0000 mon.vm04 (mon.1) 12 : audit [INF] from='osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.769969+0000 mon.vm04 (mon.1) 12 : audit [INF] from='osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.770757+0000 mon.vm01 (mon.0) 525 : audit [INF] from='osd.3 ' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.770757+0000 mon.vm01 (mon.0) 525 : audit [INF] from='osd.3 ' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.786217+0000 mon.vm01 (mon.0) 526 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.786217+0000 mon.vm01 (mon.0) 526 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.792756+0000 mon.vm01 (mon.0) 527 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:26.792756+0000 mon.vm01 (mon.0) 527 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:27.175401+0000 mon.vm04 (mon.1) 13 : audit [DBG] from='client.? 192.168.123.101:0/969282112' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:27.175401+0000 mon.vm04 (mon.1) 13 : audit [DBG] from='client.? 192.168.123.101:0/969282112' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:27.732821+0000 mon.vm01 (mon.0) 528 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:27.732821+0000 mon.vm01 (mon.0) 528 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:27.732918+0000 mon.vm01 (mon.0) 529 : audit [INF] from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]}]': finished 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:27.732918+0000 mon.vm01 (mon.0) 529 : audit [INF] from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]}]': finished 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: cluster 2026-04-16T19:22:27.738542+0000 mon.vm01 (mon.0) 530 : cluster [INF] osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597] boot 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: cluster 2026-04-16T19:22:27.738542+0000 mon.vm01 (mon.0) 530 : cluster [INF] osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597] boot 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: cluster 2026-04-16T19:22:27.738603+0000 mon.vm01 (mon.0) 531 : cluster [INF] osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917] boot 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: cluster 2026-04-16T19:22:27.738603+0000 mon.vm01 (mon.0) 531 : cluster [INF] osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917] boot 2026-04-16T19:22:28.071 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: cluster 2026-04-16T19:22:27.738633+0000 mon.vm01 (mon.0) 532 : cluster [DBG] osdmap e16: 8 total, 2 up, 8 in 2026-04-16T19:22:28.072 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: cluster 2026-04-16T19:22:27.738633+0000 mon.vm01 (mon.0) 532 : cluster [DBG] osdmap e16: 8 total, 2 up, 8 in 2026-04-16T19:22:28.072 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:27.739589+0000 mon.vm01 (mon.0) 533 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:28.072 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:27 vm01 bash[28222]: audit 2026-04-16T19:22:27.739589+0000 mon.vm01 (mon.0) 533 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:28.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.727593+0000 mon.vm01 (mon.0) 511 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.727593+0000 mon.vm01 (mon.0) 511 : audit [INF] from='osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.727887+0000 mon.vm01 (mon.0) 512 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.727887+0000 mon.vm01 (mon.0) 512 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.727926+0000 mon.vm01 (mon.0) 513 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]}]': finished 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.727926+0000 mon.vm01 (mon.0) 513 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]}]': finished 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: cluster 2026-04-16T19:22:26.730291+0000 mon.vm01 (mon.0) 514 : cluster [DBG] osdmap e15: 8 total, 0 up, 8 in 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: cluster 2026-04-16T19:22:26.730291+0000 mon.vm01 (mon.0) 514 : cluster [DBG] osdmap e15: 8 total, 0 up, 8 in 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.733599+0000 mon.vm01 (mon.0) 515 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.733599+0000 mon.vm01 (mon.0) 515 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.733765+0000 mon.vm01 (mon.0) 516 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.733765+0000 mon.vm01 (mon.0) 516 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734232+0000 mon.vm01 (mon.0) 517 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734232+0000 mon.vm01 (mon.0) 517 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734333+0000 mon.vm01 (mon.0) 518 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734333+0000 mon.vm01 (mon.0) 518 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734424+0000 mon.vm01 (mon.0) 519 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734424+0000 mon.vm01 (mon.0) 519 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734501+0000 mon.vm01 (mon.0) 520 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734501+0000 mon.vm01 (mon.0) 520 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734589+0000 mon.vm01 (mon.0) 521 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734589+0000 mon.vm01 (mon.0) 521 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734735+0000 mon.vm01 (mon.0) 522 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734735+0000 mon.vm01 (mon.0) 522 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734827+0000 mon.vm01 (mon.0) 523 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.734827+0000 mon.vm01 (mon.0) 523 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.744392+0000 mon.vm01 (mon.0) 524 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.744392+0000 mon.vm01 (mon.0) 524 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.769969+0000 mon.vm04 (mon.1) 12 : audit [INF] from='osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.769969+0000 mon.vm04 (mon.1) 12 : audit [INF] from='osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.770757+0000 mon.vm01 (mon.0) 525 : audit [INF] from='osd.3 ' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.770757+0000 mon.vm01 (mon.0) 525 : audit [INF] from='osd.3 ' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.786217+0000 mon.vm01 (mon.0) 526 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.786217+0000 mon.vm01 (mon.0) 526 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.792756+0000 mon.vm01 (mon.0) 527 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:26.792756+0000 mon.vm01 (mon.0) 527 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:27.175401+0000 mon.vm04 (mon.1) 13 : audit [DBG] from='client.? 192.168.123.101:0/969282112' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:27.175401+0000 mon.vm04 (mon.1) 13 : audit [DBG] from='client.? 192.168.123.101:0/969282112' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:27.732821+0000 mon.vm01 (mon.0) 528 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:27.732821+0000 mon.vm01 (mon.0) 528 : audit [INF] from='osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:27.732918+0000 mon.vm01 (mon.0) 529 : audit [INF] from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]}]': finished 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:27.732918+0000 mon.vm01 (mon.0) 529 : audit [INF] from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]}]': finished 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: cluster 2026-04-16T19:22:27.738542+0000 mon.vm01 (mon.0) 530 : cluster [INF] osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597] boot 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: cluster 2026-04-16T19:22:27.738542+0000 mon.vm01 (mon.0) 530 : cluster [INF] osd.0 [v2:192.168.123.104:6800/2796237597,v1:192.168.123.104:6801/2796237597] boot 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: cluster 2026-04-16T19:22:27.738603+0000 mon.vm01 (mon.0) 531 : cluster [INF] osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917] boot 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: cluster 2026-04-16T19:22:27.738603+0000 mon.vm01 (mon.0) 531 : cluster [INF] osd.1 [v2:192.168.123.101:6802/4232106917,v1:192.168.123.101:6803/4232106917] boot 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: cluster 2026-04-16T19:22:27.738633+0000 mon.vm01 (mon.0) 532 : cluster [DBG] osdmap e16: 8 total, 2 up, 8 in 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: cluster 2026-04-16T19:22:27.738633+0000 mon.vm01 (mon.0) 532 : cluster [DBG] osdmap e16: 8 total, 2 up, 8 in 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:27.739589+0000 mon.vm01 (mon.0) 533 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:28.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:27 vm04 bash[34817]: audit 2026-04-16T19:22:27.739589+0000 mon.vm01 (mon.0) 533 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-16T19:22:28.285 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:28.667 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:28.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: cluster 2026-04-16T19:22:25.745920+0000 osd.0 (osd.0) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:28.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: cluster 2026-04-16T19:22:25.745920+0000 osd.0 (osd.0) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:28.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: cluster 2026-04-16T19:22:25.745932+0000 osd.0 (osd.0) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:28.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: cluster 2026-04-16T19:22:25.745932+0000 osd.0 (osd.0) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:28.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: cluster 2026-04-16T19:22:25.966462+0000 osd.1 (osd.1) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:28.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: cluster 2026-04-16T19:22:25.966462+0000 osd.1 (osd.1) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:28.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: cluster 2026-04-16T19:22:25.966478+0000 osd.1 (osd.1) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:28.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: cluster 2026-04-16T19:22:25.966478+0000 osd.1 (osd.1) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:28.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: cluster 2026-04-16T19:22:27.263494+0000 mgr.vm01.nwhpas (mgr.14227) 84 : cluster [DBG] pgmap v28: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:28.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: cluster 2026-04-16T19:22:27.263494+0000 mgr.vm01.nwhpas (mgr.14227) 84 : cluster [DBG] pgmap v28: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:28.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741189+0000 mon.vm01 (mon.0) 534 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741189+0000 mon.vm01 (mon.0) 534 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741309+0000 mon.vm01 (mon.0) 535 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741309+0000 mon.vm01 (mon.0) 535 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741365+0000 mon.vm01 (mon.0) 536 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741365+0000 mon.vm01 (mon.0) 536 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741408+0000 mon.vm01 (mon.0) 537 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741408+0000 mon.vm01 (mon.0) 537 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741452+0000 mon.vm01 (mon.0) 538 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741452+0000 mon.vm01 (mon.0) 538 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741600+0000 mon.vm01 (mon.0) 539 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741600+0000 mon.vm01 (mon.0) 539 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741637+0000 mon.vm01 (mon.0) 540 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.741637+0000 mon.vm01 (mon.0) 540 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.742637+0000 mon.vm04 (mon.1) 14 : audit [INF] from='osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.742637+0000 mon.vm04 (mon.1) 14 : audit [INF] from='osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.757077+0000 mon.vm01 (mon.0) 541 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.757077+0000 mon.vm01 (mon.0) 541 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.757325+0000 mon.vm01 (mon.0) 542 : audit [INF] from='osd.3 ' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:27.757325+0000 mon.vm01 (mon.0) 542 : audit [INF] from='osd.3 ' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:28.430217+0000 mon.vm04 (mon.1) 15 : audit [INF] from='osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:28.430217+0000 mon.vm04 (mon.1) 15 : audit [INF] from='osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:28.431177+0000 mon.vm01 (mon.0) 543 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-16T19:22:28.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:28 vm01 bash[28222]: audit 2026-04-16T19:22:28.431177+0000 mon.vm01 (mon.0) 543 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-16T19:22:29.061 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: cluster 2026-04-16T19:22:25.745920+0000 osd.0 (osd.0) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: cluster 2026-04-16T19:22:25.745920+0000 osd.0 (osd.0) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: cluster 2026-04-16T19:22:25.745932+0000 osd.0 (osd.0) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: cluster 2026-04-16T19:22:25.745932+0000 osd.0 (osd.0) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: cluster 2026-04-16T19:22:25.966462+0000 osd.1 (osd.1) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: cluster 2026-04-16T19:22:25.966462+0000 osd.1 (osd.1) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: cluster 2026-04-16T19:22:25.966478+0000 osd.1 (osd.1) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: cluster 2026-04-16T19:22:25.966478+0000 osd.1 (osd.1) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: cluster 2026-04-16T19:22:27.263494+0000 mgr.vm01.nwhpas (mgr.14227) 84 : cluster [DBG] pgmap v28: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: cluster 2026-04-16T19:22:27.263494+0000 mgr.vm01.nwhpas (mgr.14227) 84 : cluster [DBG] pgmap v28: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741189+0000 mon.vm01 (mon.0) 534 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741189+0000 mon.vm01 (mon.0) 534 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741309+0000 mon.vm01 (mon.0) 535 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741309+0000 mon.vm01 (mon.0) 535 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741365+0000 mon.vm01 (mon.0) 536 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741365+0000 mon.vm01 (mon.0) 536 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741408+0000 mon.vm01 (mon.0) 537 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741408+0000 mon.vm01 (mon.0) 537 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741452+0000 mon.vm01 (mon.0) 538 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741452+0000 mon.vm01 (mon.0) 538 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741600+0000 mon.vm01 (mon.0) 539 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741600+0000 mon.vm01 (mon.0) 539 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741637+0000 mon.vm01 (mon.0) 540 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.741637+0000 mon.vm01 (mon.0) 540 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.742637+0000 mon.vm04 (mon.1) 14 : audit [INF] from='osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.742637+0000 mon.vm04 (mon.1) 14 : audit [INF] from='osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.757077+0000 mon.vm01 (mon.0) 541 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.757077+0000 mon.vm01 (mon.0) 541 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.757325+0000 mon.vm01 (mon.0) 542 : audit [INF] from='osd.3 ' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:27.757325+0000 mon.vm01 (mon.0) 542 : audit [INF] from='osd.3 ' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:28.430217+0000 mon.vm04 (mon.1) 15 : audit [INF] from='osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:28.430217+0000 mon.vm04 (mon.1) 15 : audit [INF] from='osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:28.431177+0000 mon.vm01 (mon.0) 543 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-16T19:22:29.062 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:28 vm04 bash[34817]: audit 2026-04-16T19:22:28.431177+0000 mon.vm01 (mon.0) 543 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-16T19:22:29.251 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:29.338 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":17,"num_osds":8,"num_up_osds":3,"osd_up_since":1776367348,"num_in_osds":8,"osd_in_since":1776367336,"num_remapped_pgs":0} 2026-04-16T19:22:29.853 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:27.308745+0000 osd.2 (osd.2) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:27.308745+0000 osd.2 (osd.2) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:27.308758+0000 osd.2 (osd.2) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:27.308758+0000 osd.2 (osd.2) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:27.797865+0000 osd.3 (osd.3) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:27.797865+0000 osd.3 (osd.3) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:27.797884+0000 osd.3 (osd.3) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:27.797884+0000 osd.3 (osd.3) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.747212+0000 mon.vm01 (mon.0) 544 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.747212+0000 mon.vm01 (mon.0) 544 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.750016+0000 mon.vm01 (mon.0) 545 : audit [INF] from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.750016+0000 mon.vm01 (mon.0) 545 : audit [INF] from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.750088+0000 mon.vm01 (mon.0) 546 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]}]': finished 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.750088+0000 mon.vm01 (mon.0) 546 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]}]': finished 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.754990+0000 mon.vm04 (mon.1) 16 : audit [INF] from='osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.754990+0000 mon.vm04 (mon.1) 16 : audit [INF] from='osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:28.757496+0000 mon.vm01 (mon.0) 547 : cluster [INF] osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774] boot 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:28.757496+0000 mon.vm01 (mon.0) 547 : cluster [INF] osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774] boot 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:28.757530+0000 mon.vm01 (mon.0) 548 : cluster [DBG] osdmap e17: 8 total, 3 up, 8 in 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:28.757530+0000 mon.vm01 (mon.0) 548 : cluster [DBG] osdmap e17: 8 total, 3 up, 8 in 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.758624+0000 mon.vm01 (mon.0) 549 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.758624+0000 mon.vm01 (mon.0) 549 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.758862+0000 mon.vm01 (mon.0) 550 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.758862+0000 mon.vm01 (mon.0) 550 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.759185+0000 mon.vm01 (mon.0) 551 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.759185+0000 mon.vm01 (mon.0) 551 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.759314+0000 mon.vm01 (mon.0) 552 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.759314+0000 mon.vm01 (mon.0) 552 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.759624+0000 mon.vm01 (mon.0) 553 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.759624+0000 mon.vm01 (mon.0) 553 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.759883+0000 mon.vm01 (mon.0) 554 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.759883+0000 mon.vm01 (mon.0) 554 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.759925+0000 mon.vm01 (mon.0) 555 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.759925+0000 mon.vm01 (mon.0) 555 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.769374+0000 mon.vm01 (mon.0) 556 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.769374+0000 mon.vm01 (mon.0) 556 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.781231+0000 mon.vm01 (mon.0) 557 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:28.781231+0000 mon.vm01 (mon.0) 557 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.250878+0000 mon.vm01 (mon.0) 558 : audit [DBG] from='client.? 192.168.123.101:0/3892748094' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.250878+0000 mon.vm01 (mon.0) 558 : audit [DBG] from='client.? 192.168.123.101:0/3892748094' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.437656+0000 mon.vm01 (mon.0) 559 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.437656+0000 mon.vm01 (mon.0) 559 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:29.854 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.443759+0000 mon.vm01 (mon.0) 560 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.443759+0000 mon.vm01 (mon.0) 560 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.554880+0000 mon.vm01 (mon.0) 561 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.554880+0000 mon.vm01 (mon.0) 561 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.754404+0000 mon.vm01 (mon.0) 562 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.754404+0000 mon.vm01 (mon.0) 562 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.754474+0000 mon.vm01 (mon.0) 563 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]}]': finished 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.754474+0000 mon.vm01 (mon.0) 563 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]}]': finished 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.754531+0000 mon.vm01 (mon.0) 564 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: audit 2026-04-16T19:22:29.754531+0000 mon.vm01 (mon.0) 564 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:29.759955+0000 mon.vm01 (mon.0) 565 : cluster [INF] osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932] boot 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:29.759955+0000 mon.vm01 (mon.0) 565 : cluster [INF] osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932] boot 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:29.760887+0000 mon.vm01 (mon.0) 566 : cluster [DBG] osdmap e18: 8 total, 4 up, 8 in 2026-04-16T19:22:29.855 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:29 vm01 bash[28222]: cluster 2026-04-16T19:22:29.760887+0000 mon.vm01 (mon.0) 566 : cluster [DBG] osdmap e18: 8 total, 4 up, 8 in 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:27.308745+0000 osd.2 (osd.2) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:27.308745+0000 osd.2 (osd.2) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:27.308758+0000 osd.2 (osd.2) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:27.308758+0000 osd.2 (osd.2) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:27.797865+0000 osd.3 (osd.3) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:27.797865+0000 osd.3 (osd.3) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:27.797884+0000 osd.3 (osd.3) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:27.797884+0000 osd.3 (osd.3) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.747212+0000 mon.vm01 (mon.0) 544 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.747212+0000 mon.vm01 (mon.0) 544 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.750016+0000 mon.vm01 (mon.0) 545 : audit [INF] from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.750016+0000 mon.vm01 (mon.0) 545 : audit [INF] from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.750088+0000 mon.vm01 (mon.0) 546 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]}]': finished 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.750088+0000 mon.vm01 (mon.0) 546 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]}]': finished 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.754990+0000 mon.vm04 (mon.1) 16 : audit [INF] from='osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.754990+0000 mon.vm04 (mon.1) 16 : audit [INF] from='osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:28.757496+0000 mon.vm01 (mon.0) 547 : cluster [INF] osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774] boot 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:28.757496+0000 mon.vm01 (mon.0) 547 : cluster [INF] osd.2 [v2:192.168.123.104:6808/1205758774,v1:192.168.123.104:6809/1205758774] boot 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:28.757530+0000 mon.vm01 (mon.0) 548 : cluster [DBG] osdmap e17: 8 total, 3 up, 8 in 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:28.757530+0000 mon.vm01 (mon.0) 548 : cluster [DBG] osdmap e17: 8 total, 3 up, 8 in 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.758624+0000 mon.vm01 (mon.0) 549 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.758624+0000 mon.vm01 (mon.0) 549 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.758862+0000 mon.vm01 (mon.0) 550 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.758862+0000 mon.vm01 (mon.0) 550 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.759185+0000 mon.vm01 (mon.0) 551 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.759185+0000 mon.vm01 (mon.0) 551 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.759314+0000 mon.vm01 (mon.0) 552 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.759314+0000 mon.vm01 (mon.0) 552 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.759624+0000 mon.vm01 (mon.0) 553 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.759624+0000 mon.vm01 (mon.0) 553 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.759883+0000 mon.vm01 (mon.0) 554 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.759883+0000 mon.vm01 (mon.0) 554 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.759925+0000 mon.vm01 (mon.0) 555 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:29.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.759925+0000 mon.vm01 (mon.0) 555 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.769374+0000 mon.vm01 (mon.0) 556 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.769374+0000 mon.vm01 (mon.0) 556 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.781231+0000 mon.vm01 (mon.0) 557 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]} : dispatch 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:28.781231+0000 mon.vm01 (mon.0) 557 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]} : dispatch 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.250878+0000 mon.vm01 (mon.0) 558 : audit [DBG] from='client.? 192.168.123.101:0/3892748094' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.250878+0000 mon.vm01 (mon.0) 558 : audit [DBG] from='client.? 192.168.123.101:0/3892748094' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.437656+0000 mon.vm01 (mon.0) 559 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.437656+0000 mon.vm01 (mon.0) 559 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.443759+0000 mon.vm01 (mon.0) 560 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.443759+0000 mon.vm01 (mon.0) 560 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.554880+0000 mon.vm01 (mon.0) 561 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.554880+0000 mon.vm01 (mon.0) 561 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.754404+0000 mon.vm01 (mon.0) 562 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.754404+0000 mon.vm01 (mon.0) 562 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.754474+0000 mon.vm01 (mon.0) 563 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]}]': finished 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.754474+0000 mon.vm01 (mon.0) 563 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]}]': finished 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.754531+0000 mon.vm01 (mon.0) 564 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: audit 2026-04-16T19:22:29.754531+0000 mon.vm01 (mon.0) 564 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:29.759955+0000 mon.vm01 (mon.0) 565 : cluster [INF] osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932] boot 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:29.759955+0000 mon.vm01 (mon.0) 565 : cluster [INF] osd.3 [v2:192.168.123.101:6810/1926756932,v1:192.168.123.101:6811/1926756932] boot 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:29.760887+0000 mon.vm01 (mon.0) 566 : cluster [DBG] osdmap e18: 8 total, 4 up, 8 in 2026-04-16T19:22:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:29 vm04 bash[34817]: cluster 2026-04-16T19:22:29.760887+0000 mon.vm01 (mon.0) 566 : cluster [DBG] osdmap e18: 8 total, 4 up, 8 in 2026-04-16T19:22:30.340 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:30.638 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:30.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: cluster 2026-04-16T19:22:29.263693+0000 mgr.vm01.nwhpas (mgr.14227) 85 : cluster [DBG] pgmap v31: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-04-16T19:22:30.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: cluster 2026-04-16T19:22:29.263693+0000 mgr.vm01.nwhpas (mgr.14227) 85 : cluster [DBG] pgmap v31: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-04-16T19:22:30.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.762418+0000 mon.vm01 (mon.0) 567 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:30.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.762418+0000 mon.vm01 (mon.0) 567 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:30.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.763048+0000 mon.vm01 (mon.0) 568 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.763048+0000 mon.vm01 (mon.0) 568 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.763271+0000 mon.vm01 (mon.0) 569 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.763271+0000 mon.vm01 (mon.0) 569 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.763447+0000 mon.vm01 (mon.0) 570 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.763447+0000 mon.vm01 (mon.0) 570 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.763546+0000 mon.vm01 (mon.0) 571 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.763546+0000 mon.vm01 (mon.0) 571 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.763652+0000 mon.vm01 (mon.0) 572 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.763652+0000 mon.vm01 (mon.0) 572 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.764752+0000 mon.vm01 (mon.0) 573 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.764752+0000 mon.vm01 (mon.0) 573 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.948936+0000 mon.vm01 (mon.0) 574 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.948936+0000 mon.vm01 (mon.0) 574 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.954340+0000 mon.vm01 (mon.0) 575 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.954340+0000 mon.vm01 (mon.0) 575 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.969611+0000 mon.vm01 (mon.0) 576 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:29.969611+0000 mon.vm01 (mon.0) 576 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.264251+0000 mon.vm04 (mon.1) 17 : audit [INF] from='osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.264251+0000 mon.vm04 (mon.1) 17 : audit [INF] from='osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.265622+0000 mon.vm01 (mon.0) 577 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.265622+0000 mon.vm01 (mon.0) 577 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.758655+0000 mon.vm01 (mon.0) 578 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.758655+0000 mon.vm01 (mon.0) 578 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.759048+0000 mon.vm01 (mon.0) 579 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.759048+0000 mon.vm01 (mon.0) 579 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.759110+0000 mon.vm01 (mon.0) 580 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]}]': finished 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.759110+0000 mon.vm01 (mon.0) 580 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]}]': finished 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: cluster 2026-04-16T19:22:30.761570+0000 mon.vm01 (mon.0) 581 : cluster [INF] osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120] boot 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: cluster 2026-04-16T19:22:30.761570+0000 mon.vm01 (mon.0) 581 : cluster [INF] osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120] boot 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: cluster 2026-04-16T19:22:30.761592+0000 mon.vm01 (mon.0) 582 : cluster [DBG] osdmap e19: 8 total, 5 up, 8 in 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: cluster 2026-04-16T19:22:30.761592+0000 mon.vm01 (mon.0) 582 : cluster [DBG] osdmap e19: 8 total, 5 up, 8 in 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.762688+0000 mon.vm01 (mon.0) 583 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.762688+0000 mon.vm01 (mon.0) 583 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.762895+0000 mon.vm01 (mon.0) 584 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.762895+0000 mon.vm01 (mon.0) 584 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.763134+0000 mon.vm01 (mon.0) 585 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.763134+0000 mon.vm01 (mon.0) 585 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.763302+0000 mon.vm01 (mon.0) 586 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.763302+0000 mon.vm01 (mon.0) 586 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.763445+0000 mon.vm04 (mon.1) 18 : audit [INF] from='osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.763445+0000 mon.vm04 (mon.1) 18 : audit [INF] from='osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.764449+0000 mon.vm01 (mon.0) 587 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.764449+0000 mon.vm01 (mon.0) 587 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.768796+0000 mon.vm01 (mon.0) 588 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:30.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:30 vm01 bash[28222]: audit 2026-04-16T19:22:30.768796+0000 mon.vm01 (mon.0) 588 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:31.073 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: cluster 2026-04-16T19:22:29.263693+0000 mgr.vm01.nwhpas (mgr.14227) 85 : cluster [DBG] pgmap v31: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: cluster 2026-04-16T19:22:29.263693+0000 mgr.vm01.nwhpas (mgr.14227) 85 : cluster [DBG] pgmap v31: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.762418+0000 mon.vm01 (mon.0) 567 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.762418+0000 mon.vm01 (mon.0) 567 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.763048+0000 mon.vm01 (mon.0) 568 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.763048+0000 mon.vm01 (mon.0) 568 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.763271+0000 mon.vm01 (mon.0) 569 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.763271+0000 mon.vm01 (mon.0) 569 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.763447+0000 mon.vm01 (mon.0) 570 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.763447+0000 mon.vm01 (mon.0) 570 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.763546+0000 mon.vm01 (mon.0) 571 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.763546+0000 mon.vm01 (mon.0) 571 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.763652+0000 mon.vm01 (mon.0) 572 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.763652+0000 mon.vm01 (mon.0) 572 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.764752+0000 mon.vm01 (mon.0) 573 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.764752+0000 mon.vm01 (mon.0) 573 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.948936+0000 mon.vm01 (mon.0) 574 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.948936+0000 mon.vm01 (mon.0) 574 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.954340+0000 mon.vm01 (mon.0) 575 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.954340+0000 mon.vm01 (mon.0) 575 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.969611+0000 mon.vm01 (mon.0) 576 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:29.969611+0000 mon.vm01 (mon.0) 576 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.264251+0000 mon.vm04 (mon.1) 17 : audit [INF] from='osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.264251+0000 mon.vm04 (mon.1) 17 : audit [INF] from='osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.265622+0000 mon.vm01 (mon.0) 577 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.265622+0000 mon.vm01 (mon.0) 577 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.758655+0000 mon.vm01 (mon.0) 578 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:31.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.758655+0000 mon.vm01 (mon.0) 578 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.759048+0000 mon.vm01 (mon.0) 579 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.759048+0000 mon.vm01 (mon.0) 579 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.759110+0000 mon.vm01 (mon.0) 580 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]}]': finished 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.759110+0000 mon.vm01 (mon.0) 580 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]}]': finished 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: cluster 2026-04-16T19:22:30.761570+0000 mon.vm01 (mon.0) 581 : cluster [INF] osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120] boot 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: cluster 2026-04-16T19:22:30.761570+0000 mon.vm01 (mon.0) 581 : cluster [INF] osd.4 [v2:192.168.123.104:6816/2077047120,v1:192.168.123.104:6817/2077047120] boot 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: cluster 2026-04-16T19:22:30.761592+0000 mon.vm01 (mon.0) 582 : cluster [DBG] osdmap e19: 8 total, 5 up, 8 in 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: cluster 2026-04-16T19:22:30.761592+0000 mon.vm01 (mon.0) 582 : cluster [DBG] osdmap e19: 8 total, 5 up, 8 in 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.762688+0000 mon.vm01 (mon.0) 583 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.762688+0000 mon.vm01 (mon.0) 583 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.762895+0000 mon.vm01 (mon.0) 584 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.762895+0000 mon.vm01 (mon.0) 584 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.763134+0000 mon.vm01 (mon.0) 585 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.763134+0000 mon.vm01 (mon.0) 585 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.763302+0000 mon.vm01 (mon.0) 586 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.763302+0000 mon.vm01 (mon.0) 586 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.763445+0000 mon.vm04 (mon.1) 18 : audit [INF] from='osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.763445+0000 mon.vm04 (mon.1) 18 : audit [INF] from='osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.764449+0000 mon.vm01 (mon.0) 587 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.764449+0000 mon.vm01 (mon.0) 587 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.768796+0000 mon.vm01 (mon.0) 588 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:30 vm04 bash[34817]: audit 2026-04-16T19:22:30.768796+0000 mon.vm01 (mon.0) 588 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:31.255 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":19,"num_osds":8,"num_up_osds":5,"osd_up_since":1776367350,"num_in_osds":8,"osd_in_since":1776367336,"num_remapped_pgs":0} 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:29.453324+0000 osd.4 (osd.4) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:29.453324+0000 osd.4 (osd.4) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:29.453341+0000 osd.4 (osd.4) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:29.453341+0000 osd.4 (osd.4) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:29.748645+0000 osd.5 (osd.5) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:29.748645+0000 osd.5 (osd.5) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:29.748655+0000 osd.5 (osd.5) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:29.748655+0000 osd.5 (osd.5) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:30.906314+0000 mon.vm01 (mon.0) 589 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:30.906314+0000 mon.vm01 (mon.0) 589 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.072755+0000 mon.vm01 (mon.0) 590 : audit [DBG] from='client.? 192.168.123.101:0/1231478186' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.072755+0000 mon.vm01 (mon.0) 590 : audit [DBG] from='client.? 192.168.123.101:0/1231478186' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:31.263925+0000 mgr.vm01.nwhpas (mgr.14227) 86 : cluster [DBG] pgmap v34: 1 pgs: 1 unknown; 0 B data, 132 MiB used, 100 GiB / 100 GiB avail 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:31.263925+0000 mgr.vm01.nwhpas (mgr.14227) 86 : cluster [DBG] pgmap v34: 1 pgs: 1 unknown; 0 B data, 132 MiB used, 100 GiB / 100 GiB avail 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.476201+0000 mon.vm01 (mon.0) 591 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.476201+0000 mon.vm01 (mon.0) 591 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.762263+0000 mon.vm01 (mon.0) 592 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.762263+0000 mon.vm01 (mon.0) 592 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.762361+0000 mon.vm01 (mon.0) 593 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]}]': finished 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.762361+0000 mon.vm01 (mon.0) 593 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]}]': finished 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:31.766638+0000 mon.vm01 (mon.0) 594 : cluster [INF] osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052] boot 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:31.766638+0000 mon.vm01 (mon.0) 594 : cluster [INF] osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052] boot 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:31.766669+0000 mon.vm01 (mon.0) 595 : cluster [DBG] osdmap e20: 8 total, 6 up, 8 in 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: cluster 2026-04-16T19:22:31.766669+0000 mon.vm01 (mon.0) 595 : cluster [DBG] osdmap e20: 8 total, 6 up, 8 in 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.766944+0000 mon.vm01 (mon.0) 596 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.766944+0000 mon.vm01 (mon.0) 596 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.767051+0000 mon.vm01 (mon.0) 597 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.767051+0000 mon.vm01 (mon.0) 597 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.767126+0000 mon.vm01 (mon.0) 598 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.767126+0000 mon.vm01 (mon.0) 598 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.767277+0000 mon.vm01 (mon.0) 599 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.767277+0000 mon.vm01 (mon.0) 599 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.767686+0000 mon.vm01 (mon.0) 600 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.767686+0000 mon.vm01 (mon.0) 600 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.769325+0000 mon.vm01 (mon.0) 601 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:32.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:31 vm04 bash[34817]: audit 2026-04-16T19:22:31.769325+0000 mon.vm01 (mon.0) 601 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:29.453324+0000 osd.4 (osd.4) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:29.453324+0000 osd.4 (osd.4) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:29.453341+0000 osd.4 (osd.4) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:29.453341+0000 osd.4 (osd.4) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:29.748645+0000 osd.5 (osd.5) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:29.748645+0000 osd.5 (osd.5) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:29.748655+0000 osd.5 (osd.5) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:29.748655+0000 osd.5 (osd.5) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:30.906314+0000 mon.vm01 (mon.0) 589 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:30.906314+0000 mon.vm01 (mon.0) 589 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.072755+0000 mon.vm01 (mon.0) 590 : audit [DBG] from='client.? 192.168.123.101:0/1231478186' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.072755+0000 mon.vm01 (mon.0) 590 : audit [DBG] from='client.? 192.168.123.101:0/1231478186' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:31.263925+0000 mgr.vm01.nwhpas (mgr.14227) 86 : cluster [DBG] pgmap v34: 1 pgs: 1 unknown; 0 B data, 132 MiB used, 100 GiB / 100 GiB avail 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:31.263925+0000 mgr.vm01.nwhpas (mgr.14227) 86 : cluster [DBG] pgmap v34: 1 pgs: 1 unknown; 0 B data, 132 MiB used, 100 GiB / 100 GiB avail 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.476201+0000 mon.vm01 (mon.0) 591 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.476201+0000 mon.vm01 (mon.0) 591 : audit [INF] from='osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052]' entity='osd.5' 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.762263+0000 mon.vm01 (mon.0) 592 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.762263+0000 mon.vm01 (mon.0) 592 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.762361+0000 mon.vm01 (mon.0) 593 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]}]': finished 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.762361+0000 mon.vm01 (mon.0) 593 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]}]': finished 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:31.766638+0000 mon.vm01 (mon.0) 594 : cluster [INF] osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052] boot 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:31.766638+0000 mon.vm01 (mon.0) 594 : cluster [INF] osd.5 [v2:192.168.123.101:6818/3887158052,v1:192.168.123.101:6819/3887158052] boot 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:31.766669+0000 mon.vm01 (mon.0) 595 : cluster [DBG] osdmap e20: 8 total, 6 up, 8 in 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: cluster 2026-04-16T19:22:31.766669+0000 mon.vm01 (mon.0) 595 : cluster [DBG] osdmap e20: 8 total, 6 up, 8 in 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.766944+0000 mon.vm01 (mon.0) 596 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.766944+0000 mon.vm01 (mon.0) 596 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.767051+0000 mon.vm01 (mon.0) 597 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.767051+0000 mon.vm01 (mon.0) 597 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.767126+0000 mon.vm01 (mon.0) 598 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.767126+0000 mon.vm01 (mon.0) 598 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.767277+0000 mon.vm01 (mon.0) 599 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.767277+0000 mon.vm01 (mon.0) 599 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.767686+0000 mon.vm01 (mon.0) 600 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.767686+0000 mon.vm01 (mon.0) 600 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.769325+0000 mon.vm01 (mon.0) 601 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:32.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:31 vm01 bash[28222]: audit 2026-04-16T19:22:31.769325+0000 mon.vm01 (mon.0) 601 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:32.256 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:32.537 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:32.946 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:33.040 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":21,"num_osds":8,"num_up_osds":7,"osd_up_since":1776367352,"num_in_osds":8,"osd_in_since":1776367336,"num_remapped_pgs":1} 2026-04-16T19:22:33.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:31.234825+0000 osd.6 (osd.6) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:31.234825+0000 osd.6 (osd.6) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:31.234837+0000 osd.6 (osd.6) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:31.234837+0000 osd.6 (osd.6) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:32.766178+0000 mon.vm01 (mon.0) 602 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:32.766178+0000 mon.vm01 (mon.0) 602 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:32.769010+0000 mon.vm01 (mon.0) 603 : cluster [INF] osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190] boot 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:32.769010+0000 mon.vm01 (mon.0) 603 : cluster [INF] osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190] boot 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:32.769036+0000 mon.vm01 (mon.0) 604 : cluster [DBG] osdmap e21: 8 total, 7 up, 8 in 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:32.769036+0000 mon.vm01 (mon.0) 604 : cluster [DBG] osdmap e21: 8 total, 7 up, 8 in 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:32.769483+0000 mon.vm01 (mon.0) 605 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:32.769483+0000 mon.vm01 (mon.0) 605 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:32.769589+0000 mon.vm01 (mon.0) 606 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:32.769589+0000 mon.vm01 (mon.0) 606 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:32.775340+0000 mon.vm01 (mon.0) 607 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:32.775340+0000 mon.vm01 (mon.0) 607 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:32.946278+0000 mon.vm01 (mon.0) 608 : audit [DBG] from='client.? 192.168.123.101:0/1954239277' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:32.946278+0000 mon.vm01 (mon.0) 608 : audit [DBG] from='client.? 192.168.123.101:0/1954239277' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:33.459407+0000 mon.vm01 (mon.0) 609 : cluster [INF] osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606] boot 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:33.459407+0000 mon.vm01 (mon.0) 609 : cluster [INF] osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606] boot 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:33.459474+0000 mon.vm01 (mon.0) 610 : cluster [DBG] osdmap e22: 8 total, 8 up, 8 in 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: cluster 2026-04-16T19:22:33.459474+0000 mon.vm01 (mon.0) 610 : cluster [DBG] osdmap e22: 8 total, 8 up, 8 in 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:33.459861+0000 mon.vm01 (mon.0) 611 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:33.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:33 vm01 bash[28222]: audit 2026-04-16T19:22:33.459861+0000 mon.vm01 (mon.0) 611 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:34.042 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd stat -f json 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:31.234825+0000 osd.6 (osd.6) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:31.234825+0000 osd.6 (osd.6) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:31.234837+0000 osd.6 (osd.6) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:31.234837+0000 osd.6 (osd.6) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:32.766178+0000 mon.vm01 (mon.0) 602 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:32.766178+0000 mon.vm01 (mon.0) 602 : audit [INF] from='osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:32.769010+0000 mon.vm01 (mon.0) 603 : cluster [INF] osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190] boot 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:32.769010+0000 mon.vm01 (mon.0) 603 : cluster [INF] osd.6 [v2:192.168.123.104:6824/3179997190,v1:192.168.123.104:6825/3179997190] boot 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:32.769036+0000 mon.vm01 (mon.0) 604 : cluster [DBG] osdmap e21: 8 total, 7 up, 8 in 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:32.769036+0000 mon.vm01 (mon.0) 604 : cluster [DBG] osdmap e21: 8 total, 7 up, 8 in 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:32.769483+0000 mon.vm01 (mon.0) 605 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:32.769483+0000 mon.vm01 (mon.0) 605 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:32.769589+0000 mon.vm01 (mon.0) 606 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:32.769589+0000 mon.vm01 (mon.0) 606 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:32.775340+0000 mon.vm01 (mon.0) 607 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:32.775340+0000 mon.vm01 (mon.0) 607 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:32.946278+0000 mon.vm01 (mon.0) 608 : audit [DBG] from='client.? 192.168.123.101:0/1954239277' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:32.946278+0000 mon.vm01 (mon.0) 608 : audit [DBG] from='client.? 192.168.123.101:0/1954239277' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:33.459407+0000 mon.vm01 (mon.0) 609 : cluster [INF] osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606] boot 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:33.459407+0000 mon.vm01 (mon.0) 609 : cluster [INF] osd.7 [v2:192.168.123.101:6826/2609567606,v1:192.168.123.101:6827/2609567606] boot 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:33.459474+0000 mon.vm01 (mon.0) 610 : cluster [DBG] osdmap e22: 8 total, 8 up, 8 in 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: cluster 2026-04-16T19:22:33.459474+0000 mon.vm01 (mon.0) 610 : cluster [DBG] osdmap e22: 8 total, 8 up, 8 in 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:33.459861+0000 mon.vm01 (mon.0) 611 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:34.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:33 vm04 bash[34817]: audit 2026-04-16T19:22:33.459861+0000 mon.vm01 (mon.0) 611 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-16T19:22:34.333 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:34.745 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:34.825 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":23,"num_osds":8,"num_up_osds":8,"osd_up_since":1776367353,"num_in_osds":8,"osd_in_since":1776367336,"num_remapped_pgs":0} 2026-04-16T19:22:34.826 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd dump --format=json 2026-04-16T19:22:35.155 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:35.170 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:34 vm04 bash[34817]: cluster 2026-04-16T19:22:31.926627+0000 osd.7 (osd.7) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:35.170 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:34 vm04 bash[34817]: cluster 2026-04-16T19:22:31.926627+0000 osd.7 (osd.7) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:35.170 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:34 vm04 bash[34817]: cluster 2026-04-16T19:22:31.926639+0000 osd.7 (osd.7) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:35.170 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:34 vm04 bash[34817]: cluster 2026-04-16T19:22:31.926639+0000 osd.7 (osd.7) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:35.170 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:34 vm04 bash[34817]: cluster 2026-04-16T19:22:33.264139+0000 mgr.vm01.nwhpas (mgr.14227) 87 : cluster [DBG] pgmap v37: 1 pgs: 1 creating+peering; 0 B data, 185 MiB used, 140 GiB / 140 GiB avail 2026-04-16T19:22:35.170 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:34 vm04 bash[34817]: cluster 2026-04-16T19:22:33.264139+0000 mgr.vm01.nwhpas (mgr.14227) 87 : cluster [DBG] pgmap v37: 1 pgs: 1 creating+peering; 0 B data, 185 MiB used, 140 GiB / 140 GiB avail 2026-04-16T19:22:35.170 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:34 vm04 bash[34817]: cluster 2026-04-16T19:22:34.463297+0000 mon.vm01 (mon.0) 612 : cluster [DBG] osdmap e23: 8 total, 8 up, 8 in 2026-04-16T19:22:35.170 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:34 vm04 bash[34817]: cluster 2026-04-16T19:22:34.463297+0000 mon.vm01 (mon.0) 612 : cluster [DBG] osdmap e23: 8 total, 8 up, 8 in 2026-04-16T19:22:35.170 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:34 vm04 bash[34817]: audit 2026-04-16T19:22:34.744401+0000 mon.vm01 (mon.0) 613 : audit [DBG] from='client.? 192.168.123.101:0/3441736218' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:35.170 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:34 vm04 bash[34817]: audit 2026-04-16T19:22:34.744401+0000 mon.vm01 (mon.0) 613 : audit [DBG] from='client.? 192.168.123.101:0/3441736218' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:35.178 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:34 vm01 bash[28222]: cluster 2026-04-16T19:22:31.926627+0000 osd.7 (osd.7) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:35.178 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:34 vm01 bash[28222]: cluster 2026-04-16T19:22:31.926627+0000 osd.7 (osd.7) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-16T19:22:35.178 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:34 vm01 bash[28222]: cluster 2026-04-16T19:22:31.926639+0000 osd.7 (osd.7) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:35.178 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:34 vm01 bash[28222]: cluster 2026-04-16T19:22:31.926639+0000 osd.7 (osd.7) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-16T19:22:35.178 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:34 vm01 bash[28222]: cluster 2026-04-16T19:22:33.264139+0000 mgr.vm01.nwhpas (mgr.14227) 87 : cluster [DBG] pgmap v37: 1 pgs: 1 creating+peering; 0 B data, 185 MiB used, 140 GiB / 140 GiB avail 2026-04-16T19:22:35.178 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:34 vm01 bash[28222]: cluster 2026-04-16T19:22:33.264139+0000 mgr.vm01.nwhpas (mgr.14227) 87 : cluster [DBG] pgmap v37: 1 pgs: 1 creating+peering; 0 B data, 185 MiB used, 140 GiB / 140 GiB avail 2026-04-16T19:22:35.178 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:34 vm01 bash[28222]: cluster 2026-04-16T19:22:34.463297+0000 mon.vm01 (mon.0) 612 : cluster [DBG] osdmap e23: 8 total, 8 up, 8 in 2026-04-16T19:22:35.178 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:34 vm01 bash[28222]: cluster 2026-04-16T19:22:34.463297+0000 mon.vm01 (mon.0) 612 : cluster [DBG] osdmap e23: 8 total, 8 up, 8 in 2026-04-16T19:22:35.178 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:34 vm01 bash[28222]: audit 2026-04-16T19:22:34.744401+0000 mon.vm01 (mon.0) 613 : audit [DBG] from='client.? 192.168.123.101:0/3441736218' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:35.178 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:34 vm01 bash[28222]: audit 2026-04-16T19:22:34.744401+0000 mon.vm01 (mon.0) 613 : audit [DBG] from='client.? 192.168.123.101:0/3441736218' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-16T19:22:35.732 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:35.732 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":23,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","created":"2026-04-16T19:20:06.880988+0000","modified":"2026-04-16T19:22:34.456719+0000","last_up_change":"2026-04-16T19:22:33.452884+0000","last_in_change":"2026-04-16T19:22:16.224487+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":10,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-16T19:22:29.558220+0000","flags":32769,"flags_names":"hashpspool,creating","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"19","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"f4401b97-80b9-400c-9e95-0cc516e41cfc","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6800","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6801","nonce":2796237597}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6803","nonce":2796237597}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6807","nonce":2796237597}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6805","nonce":2796237597}]},"public_addr":"192.168.123.104:6801/2796237597","cluster_addr":"192.168.123.104:6803/2796237597","heartbeat_back_addr":"192.168.123.104:6807/2796237597","heartbeat_front_addr":"192.168.123.104:6805/2796237597","state":["exists","up"]},{"osd":1,"uuid":"41dc1c75-f170-45be-bb8a-72218138afbc","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6803","nonce":4232106917}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6805","nonce":4232106917}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6809","nonce":4232106917}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6807","nonce":4232106917}]},"public_addr":"192.168.123.101:6803/4232106917","cluster_addr":"192.168.123.101:6805/4232106917","heartbeat_back_addr":"192.168.123.101:6809/4232106917","heartbeat_front_addr":"192.168.123.101:6807/4232106917","state":["exists","up"]},{"osd":2,"uuid":"31f3cbba-e205-40f7-b992-d9d70a84e201","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":20,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6809","nonce":1205758774}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6811","nonce":1205758774}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6815","nonce":1205758774}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6813","nonce":1205758774}]},"public_addr":"192.168.123.104:6809/1205758774","cluster_addr":"192.168.123.104:6811/1205758774","heartbeat_back_addr":"192.168.123.104:6815/1205758774","heartbeat_front_addr":"192.168.123.104:6813/1205758774","state":["exists","up"]},{"osd":3,"uuid":"f8707e07-f8bc-46db-ad2f-0aa8f0a897b7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6811","nonce":1926756932}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6813","nonce":1926756932}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6817","nonce":1926756932}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6815","nonce":1926756932}]},"public_addr":"192.168.123.101:6811/1926756932","cluster_addr":"192.168.123.101:6813/1926756932","heartbeat_back_addr":"192.168.123.101:6817/1926756932","heartbeat_front_addr":"192.168.123.101:6815/1926756932","state":["exists","up"]},{"osd":4,"uuid":"f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6816","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6817","nonce":2077047120}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6818","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6819","nonce":2077047120}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6822","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6823","nonce":2077047120}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6820","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6821","nonce":2077047120}]},"public_addr":"192.168.123.104:6817/2077047120","cluster_addr":"192.168.123.104:6819/2077047120","heartbeat_back_addr":"192.168.123.104:6823/2077047120","heartbeat_front_addr":"192.168.123.104:6821/2077047120","state":["exists","up"]},{"osd":5,"uuid":"79479d78-c6c8-4447-be64-0e15c9cad5ce","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":21,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6819","nonce":3887158052}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6821","nonce":3887158052}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6825","nonce":3887158052}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6823","nonce":3887158052}]},"public_addr":"192.168.123.101:6819/3887158052","cluster_addr":"192.168.123.101:6821/3887158052","heartbeat_back_addr":"192.168.123.101:6825/3887158052","heartbeat_front_addr":"192.168.123.101:6823/3887158052","state":["exists","up"]},{"osd":6,"uuid":"a60a41cd-e249-46a0-a399-39726704af0a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6824","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6825","nonce":3179997190}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6826","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6827","nonce":3179997190}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6830","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6831","nonce":3179997190}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6828","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6829","nonce":3179997190}]},"public_addr":"192.168.123.104:6825/3179997190","cluster_addr":"192.168.123.104:6827/3179997190","heartbeat_back_addr":"192.168.123.104:6831/3179997190","heartbeat_front_addr":"192.168.123.104:6829/3179997190","state":["exists","up"]},{"osd":7,"uuid":"eb9ea244-fcd7-413a-98de-06ed25a24354","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6827","nonce":2609567606}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6829","nonce":2609567606}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6832","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6833","nonce":2609567606}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6831","nonce":2609567606}]},"public_addr":"192.168.123.101:6827/2609567606","cluster_addr":"192.168.123.101:6829/2609567606","heartbeat_back_addr":"192.168.123.101:6833/2609567606","heartbeat_front_addr":"192.168.123.101:6831/2609567606","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:25.745934+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:25.966480+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:27.308776+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:27.797886+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:29.453343+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:29.748657+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:31.234839+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:31.926642+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/3322368483":"2026-04-17T19:21:37.248274+0000","192.168.123.101:6801/4173875572":"2026-04-17T19:21:37.248274+0000","192.168.123.101:0/3503652498":"2026-04-17T19:20:31.772743+0000","192.168.123.101:0/3743328471":"2026-04-17T19:20:31.772743+0000","192.168.123.101:6800/4261365906":"2026-04-17T19:20:54.810746+0000","192.168.123.101:6801/1125309602":"2026-04-17T19:20:31.772743+0000","192.168.123.101:0/954154715":"2026-04-17T19:20:31.772743+0000","192.168.123.101:0/671470231":"2026-04-17T19:20:54.810746+0000","192.168.123.101:6800/1125309602":"2026-04-17T19:20:31.772743+0000","192.168.123.101:6801/4261365906":"2026-04-17T19:20:54.810746+0000","192.168.123.101:0/3082463839":"2026-04-17T19:21:37.248274+0000","192.168.123.101:0/1914076311":"2026-04-17T19:20:54.810746+0000","192.168.123.101:0/2586291185":"2026-04-17T19:20:54.810746+0000","192.168.123.101:0/1908358679":"2026-04-17T19:21:37.248274+0000","192.168.123.101:6800/4173875572":"2026-04-17T19:21:37.248274+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-16T19:22:35.894 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-04-16T19:22:29.558220+0000', 'flags': 32769, 'flags_names': 'hashpspool,creating', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'is_stretch_pool': False, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '19', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'nonprimary_shards': '{}', 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}, 'read_balance': {'score_type': 'Fair distribution', 'score_acting': 7.889999866485596, 'score_stable': 7.889999866485596, 'optimal_score': 0.3799999952316284, 'raw_score_acting': 3, 'raw_score_stable': 3, 'primary_affinity_weighted': 1, 'average_primary_affinity': 1, 'average_primary_affinity_weighted': 1}}] 2026-04-16T19:22:35.895 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd pool get .mgr pg_num 2026-04-16T19:22:36.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:34.791414+0000 mon.vm01 (mon.0) 614 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:34.791414+0000 mon.vm01 (mon.0) 614 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:34.807143+0000 mon.vm01 (mon.0) 615 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:34.807143+0000 mon.vm01 (mon.0) 615 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:35.500470+0000 mon.vm01 (mon.0) 616 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:35.500470+0000 mon.vm01 (mon.0) 616 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:35.585759+0000 mon.vm01 (mon.0) 617 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:35.585759+0000 mon.vm01 (mon.0) 617 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:35.659063+0000 mon.vm01 (mon.0) 618 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:35.659063+0000 mon.vm01 (mon.0) 618 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:35.729759+0000 mon.vm04 (mon.1) 19 : audit [DBG] from='client.? 192.168.123.101:0/3814367706' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:36.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:35.729759+0000 mon.vm04 (mon.1) 19 : audit [DBG] from='client.? 192.168.123.101:0/3814367706' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:36.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:35.737807+0000 mon.vm01 (mon.0) 619 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:35 vm01 bash[28222]: audit 2026-04-16T19:22:35.737807+0000 mon.vm01 (mon.0) 619 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:34.791414+0000 mon.vm01 (mon.0) 614 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:34.791414+0000 mon.vm01 (mon.0) 614 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:34.807143+0000 mon.vm01 (mon.0) 615 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:34.807143+0000 mon.vm01 (mon.0) 615 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:35.500470+0000 mon.vm01 (mon.0) 616 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:35.500470+0000 mon.vm01 (mon.0) 616 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:35.585759+0000 mon.vm01 (mon.0) 617 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:35.585759+0000 mon.vm01 (mon.0) 617 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:35.659063+0000 mon.vm01 (mon.0) 618 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:35.659063+0000 mon.vm01 (mon.0) 618 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:35.729759+0000 mon.vm04 (mon.1) 19 : audit [DBG] from='client.? 192.168.123.101:0/3814367706' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:35.729759+0000 mon.vm04 (mon.1) 19 : audit [DBG] from='client.? 192.168.123.101:0/3814367706' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:35.737807+0000 mon.vm01 (mon.0) 619 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:35 vm04 bash[34817]: audit 2026-04-16T19:22:35.737807+0000 mon.vm01 (mon.0) 619 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:36.249 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:36.842 INFO:teuthology.orchestra.run.vm01.stdout:pg_num: 1 2026-04-16T19:22:36.945 INFO:tasks.cephadm:Setting up client nodes... 2026-04-16T19:22:36.945 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-04-16T19:22:37.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: cluster 2026-04-16T19:22:35.264764+0000 mgr.vm01.nwhpas (mgr.14227) 88 : cluster [DBG] pgmap v40: 1 pgs: 1 creating+peering; 0 B data, 212 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: cluster 2026-04-16T19:22:35.264764+0000 mgr.vm01.nwhpas (mgr.14227) 88 : cluster [DBG] pgmap v40: 1 pgs: 1 creating+peering; 0 B data, 212 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: cluster 2026-04-16T19:22:35.821963+0000 mon.vm01 (mon.0) 620 : cluster [DBG] osdmap e24: 8 total, 8 up, 8 in 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: cluster 2026-04-16T19:22:35.821963+0000 mon.vm01 (mon.0) 620 : cluster [DBG] osdmap e24: 8 total, 8 up, 8 in 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:35.827028+0000 mon.vm01 (mon.0) 621 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:35.827028+0000 mon.vm01 (mon.0) 621 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:35.833886+0000 mon.vm01 (mon.0) 622 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:35.833886+0000 mon.vm01 (mon.0) 622 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:35.840053+0000 mon.vm01 (mon.0) 623 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:35.840053+0000 mon.vm01 (mon.0) 623 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:35.928541+0000 mon.vm01 (mon.0) 624 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:35.928541+0000 mon.vm01 (mon.0) 624 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.009211+0000 mon.vm01 (mon.0) 625 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.009211+0000 mon.vm01 (mon.0) 625 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.125975+0000 mon.vm01 (mon.0) 626 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.125975+0000 mon.vm01 (mon.0) 626 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.128465+0000 mon.vm01 (mon.0) 627 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.128465+0000 mon.vm01 (mon.0) 627 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.156220+0000 mon.vm01 (mon.0) 628 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.156220+0000 mon.vm01 (mon.0) 628 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.156520+0000 mon.vm01 (mon.0) 629 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.156520+0000 mon.vm01 (mon.0) 629 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.156614+0000 mon.vm01 (mon.0) 630 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.156614+0000 mon.vm01 (mon.0) 630 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.158708+0000 mon.vm04 (mon.1) 20 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.158708+0000 mon.vm04 (mon.1) 20 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.159261+0000 mon.vm01 (mon.0) 631 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.159261+0000 mon.vm01 (mon.0) 631 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.159312+0000 mon.vm01 (mon.0) 632 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.159312+0000 mon.vm01 (mon.0) 632 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.177427+0000 mon.vm04 (mon.1) 21 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-16T19:22:37.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:36 vm04 bash[34817]: audit 2026-04-16T19:22:36.177427+0000 mon.vm04 (mon.1) 21 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-16T19:22:37.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: cluster 2026-04-16T19:22:35.264764+0000 mgr.vm01.nwhpas (mgr.14227) 88 : cluster [DBG] pgmap v40: 1 pgs: 1 creating+peering; 0 B data, 212 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:37.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: cluster 2026-04-16T19:22:35.264764+0000 mgr.vm01.nwhpas (mgr.14227) 88 : cluster [DBG] pgmap v40: 1 pgs: 1 creating+peering; 0 B data, 212 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:37.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: cluster 2026-04-16T19:22:35.821963+0000 mon.vm01 (mon.0) 620 : cluster [DBG] osdmap e24: 8 total, 8 up, 8 in 2026-04-16T19:22:37.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: cluster 2026-04-16T19:22:35.821963+0000 mon.vm01 (mon.0) 620 : cluster [DBG] osdmap e24: 8 total, 8 up, 8 in 2026-04-16T19:22:37.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:35.827028+0000 mon.vm01 (mon.0) 621 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:35.827028+0000 mon.vm01 (mon.0) 621 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:35.833886+0000 mon.vm01 (mon.0) 622 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:35.833886+0000 mon.vm01 (mon.0) 622 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:35.840053+0000 mon.vm01 (mon.0) 623 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:35.840053+0000 mon.vm01 (mon.0) 623 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:35.928541+0000 mon.vm01 (mon.0) 624 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:35.928541+0000 mon.vm01 (mon.0) 624 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.009211+0000 mon.vm01 (mon.0) 625 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.009211+0000 mon.vm01 (mon.0) 625 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.125975+0000 mon.vm01 (mon.0) 626 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.125975+0000 mon.vm01 (mon.0) 626 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.128465+0000 mon.vm01 (mon.0) 627 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.128465+0000 mon.vm01 (mon.0) 627 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.156220+0000 mon.vm01 (mon.0) 628 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.156220+0000 mon.vm01 (mon.0) 628 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.156520+0000 mon.vm01 (mon.0) 629 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.156520+0000 mon.vm01 (mon.0) 629 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.156614+0000 mon.vm01 (mon.0) 630 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.156614+0000 mon.vm01 (mon.0) 630 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.158708+0000 mon.vm04 (mon.1) 20 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.158708+0000 mon.vm04 (mon.1) 20 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.159261+0000 mon.vm01 (mon.0) 631 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.159261+0000 mon.vm01 (mon.0) 631 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.159312+0000 mon.vm01 (mon.0) 632 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.159312+0000 mon.vm01 (mon.0) 632 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "mon metadata", "id": "vm04"} : dispatch 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.177427+0000 mon.vm04 (mon.1) 21 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-16T19:22:37.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:36 vm01 bash[28222]: audit 2026-04-16T19:22:36.177427+0000 mon.vm04 (mon.1) 21 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-16T19:22:37.269 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:37.803 INFO:teuthology.orchestra.run.vm01.stdout:[client.0] 2026-04-16T19:22:37.803 INFO:teuthology.orchestra.run.vm01.stdout: key = AQD9NuFpkD18LxAAKba55mH0e59HTMG/25RCzg== 2026-04-16T19:22:37.886 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:22:37.886 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-04-16T19:22:37.886 DEBUG:teuthology.orchestra.run.vm01:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-04-16T19:22:37.903 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-04-16T19:22:38.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:36.842024+0000 mon.vm01 (mon.0) 633 : audit [DBG] from='client.? 192.168.123.101:0/3423210858' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:36.842024+0000 mon.vm01 (mon.0) 633 : audit [DBG] from='client.? 192.168.123.101:0/3423210858' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: cephadm 2026-04-16T19:22:37.150309+0000 mgr.vm01.nwhpas (mgr.14227) 89 : cephadm [INF] Detected new or changed devices on vm04 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: cephadm 2026-04-16T19:22:37.150309+0000 mgr.vm01.nwhpas (mgr.14227) 89 : cephadm [INF] Detected new or changed devices on vm04 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.159070+0000 mon.vm01 (mon.0) 634 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.159070+0000 mon.vm01 (mon.0) 634 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.168697+0000 mon.vm01 (mon.0) 635 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.168697+0000 mon.vm01 (mon.0) 635 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.169669+0000 mon.vm01 (mon.0) 636 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.169669+0000 mon.vm01 (mon.0) 636 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: cluster 2026-04-16T19:22:37.265385+0000 mgr.vm01.nwhpas (mgr.14227) 90 : cluster [DBG] pgmap v42: 1 pgs: 1 creating+peering; 0 B data, 212 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: cluster 2026-04-16T19:22:37.265385+0000 mgr.vm01.nwhpas (mgr.14227) 90 : cluster [DBG] pgmap v42: 1 pgs: 1 creating+peering; 0 B data, 212 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.558861+0000 mon.vm01 (mon.0) 637 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.558861+0000 mon.vm01 (mon.0) 637 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.796508+0000 mon.vm01 (mon.0) 638 : audit [INF] from='client.? 192.168.123.101:0/4241311657' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.796508+0000 mon.vm01 (mon.0) 638 : audit [INF] from='client.? 192.168.123.101:0/4241311657' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.800265+0000 mon.vm01 (mon.0) 639 : audit [INF] from='client.? 192.168.123.101:0/4241311657' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-16T19:22:38.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:37 vm01 bash[28222]: audit 2026-04-16T19:22:37.800265+0000 mon.vm01 (mon.0) 639 : audit [INF] from='client.? 192.168.123.101:0/4241311657' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:36.842024+0000 mon.vm01 (mon.0) 633 : audit [DBG] from='client.? 192.168.123.101:0/3423210858' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:36.842024+0000 mon.vm01 (mon.0) 633 : audit [DBG] from='client.? 192.168.123.101:0/3423210858' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: cephadm 2026-04-16T19:22:37.150309+0000 mgr.vm01.nwhpas (mgr.14227) 89 : cephadm [INF] Detected new or changed devices on vm04 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: cephadm 2026-04-16T19:22:37.150309+0000 mgr.vm01.nwhpas (mgr.14227) 89 : cephadm [INF] Detected new or changed devices on vm04 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.159070+0000 mon.vm01 (mon.0) 634 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.159070+0000 mon.vm01 (mon.0) 634 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.168697+0000 mon.vm01 (mon.0) 635 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.168697+0000 mon.vm01 (mon.0) 635 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.169669+0000 mon.vm01 (mon.0) 636 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.169669+0000 mon.vm01 (mon.0) 636 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: cluster 2026-04-16T19:22:37.265385+0000 mgr.vm01.nwhpas (mgr.14227) 90 : cluster [DBG] pgmap v42: 1 pgs: 1 creating+peering; 0 B data, 212 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: cluster 2026-04-16T19:22:37.265385+0000 mgr.vm01.nwhpas (mgr.14227) 90 : cluster [DBG] pgmap v42: 1 pgs: 1 creating+peering; 0 B data, 212 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.558861+0000 mon.vm01 (mon.0) 637 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.558861+0000 mon.vm01 (mon.0) 637 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.796508+0000 mon.vm01 (mon.0) 638 : audit [INF] from='client.? 192.168.123.101:0/4241311657' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.796508+0000 mon.vm01 (mon.0) 638 : audit [INF] from='client.? 192.168.123.101:0/4241311657' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.800265+0000 mon.vm01 (mon.0) 639 : audit [INF] from='client.? 192.168.123.101:0/4241311657' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-16T19:22:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:37 vm04 bash[34817]: audit 2026-04-16T19:22:37.800265+0000 mon.vm01 (mon.0) 639 : audit [INF] from='client.? 192.168.123.101:0/4241311657' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-16T19:22:38.220 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm04/config 2026-04-16T19:22:38.685 INFO:teuthology.orchestra.run.vm04.stdout:[client.1] 2026-04-16T19:22:38.685 INFO:teuthology.orchestra.run.vm04.stdout: key = AQD+NuFpfpBhKBAAG0sg2GoyvWnUdmici0Rtkg== 2026-04-16T19:22:38.758 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-16T19:22:38.758 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-04-16T19:22:38.758 DEBUG:teuthology.orchestra.run.vm04:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-04-16T19:22:38.817 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph config log 1 --format=json 2026-04-16T19:22:39.097 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: cluster 2026-04-16T19:22:38.201400+0000 mon.vm01 (mon.0) 640 : cluster [DBG] mgrmap e19: vm01.nwhpas(active, since 60s), standbys: vm04.ztqrcx 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: cluster 2026-04-16T19:22:38.201400+0000 mon.vm01 (mon.0) 640 : cluster [DBG] mgrmap e19: vm01.nwhpas(active, since 60s), standbys: vm04.ztqrcx 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: cephadm 2026-04-16T19:22:38.315646+0000 mgr.vm01.nwhpas (mgr.14227) 91 : cephadm [INF] Detected new or changed devices on vm01 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: cephadm 2026-04-16T19:22:38.315646+0000 mgr.vm01.nwhpas (mgr.14227) 91 : cephadm [INF] Detected new or changed devices on vm01 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.324395+0000 mon.vm01 (mon.0) 641 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.324395+0000 mon.vm01 (mon.0) 641 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.330375+0000 mon.vm01 (mon.0) 642 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.330375+0000 mon.vm01 (mon.0) 642 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.331520+0000 mon.vm01 (mon.0) 643 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.331520+0000 mon.vm01 (mon.0) 643 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.332276+0000 mon.vm01 (mon.0) 644 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.332276+0000 mon.vm01 (mon.0) 644 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.332750+0000 mon.vm01 (mon.0) 645 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.332750+0000 mon.vm01 (mon.0) 645 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.337455+0000 mon.vm01 (mon.0) 646 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.337455+0000 mon.vm01 (mon.0) 646 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.339084+0000 mon.vm01 (mon.0) 647 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.339084+0000 mon.vm01 (mon.0) 647 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.676248+0000 mon.vm04 (mon.1) 22 : audit [INF] from='client.? 192.168.123.104:0/949839350' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.676248+0000 mon.vm04 (mon.1) 22 : audit [INF] from='client.? 192.168.123.104:0/949839350' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.677357+0000 mon.vm01 (mon.0) 648 : audit [INF] from='client.? ' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.677357+0000 mon.vm01 (mon.0) 648 : audit [INF] from='client.? ' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.681083+0000 mon.vm01 (mon.0) 649 : audit [INF] from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-16T19:22:39.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:39 vm04 bash[34817]: audit 2026-04-16T19:22:38.681083+0000 mon.vm01 (mon.0) 649 : audit [INF] from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: cluster 2026-04-16T19:22:38.201400+0000 mon.vm01 (mon.0) 640 : cluster [DBG] mgrmap e19: vm01.nwhpas(active, since 60s), standbys: vm04.ztqrcx 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: cluster 2026-04-16T19:22:38.201400+0000 mon.vm01 (mon.0) 640 : cluster [DBG] mgrmap e19: vm01.nwhpas(active, since 60s), standbys: vm04.ztqrcx 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: cephadm 2026-04-16T19:22:38.315646+0000 mgr.vm01.nwhpas (mgr.14227) 91 : cephadm [INF] Detected new or changed devices on vm01 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: cephadm 2026-04-16T19:22:38.315646+0000 mgr.vm01.nwhpas (mgr.14227) 91 : cephadm [INF] Detected new or changed devices on vm01 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.324395+0000 mon.vm01 (mon.0) 641 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.324395+0000 mon.vm01 (mon.0) 641 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.330375+0000 mon.vm01 (mon.0) 642 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.330375+0000 mon.vm01 (mon.0) 642 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.331520+0000 mon.vm01 (mon.0) 643 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.331520+0000 mon.vm01 (mon.0) 643 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.332276+0000 mon.vm01 (mon.0) 644 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.332276+0000 mon.vm01 (mon.0) 644 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:22:39.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.332750+0000 mon.vm01 (mon.0) 645 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:22:39.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.332750+0000 mon.vm01 (mon.0) 645 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:22:39.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.337455+0000 mon.vm01 (mon.0) 646 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.337455+0000 mon.vm01 (mon.0) 646 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:39.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.339084+0000 mon.vm01 (mon.0) 647 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:22:39.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.339084+0000 mon.vm01 (mon.0) 647 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:22:39.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.676248+0000 mon.vm04 (mon.1) 22 : audit [INF] from='client.? 192.168.123.104:0/949839350' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:39.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.676248+0000 mon.vm04 (mon.1) 22 : audit [INF] from='client.? 192.168.123.104:0/949839350' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:39.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.677357+0000 mon.vm01 (mon.0) 648 : audit [INF] from='client.? ' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:39.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.677357+0000 mon.vm01 (mon.0) 648 : audit [INF] from='client.? ' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-16T19:22:39.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.681083+0000 mon.vm01 (mon.0) 649 : audit [INF] from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-16T19:22:39.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:39 vm01 bash[28222]: audit 2026-04-16T19:22:38.681083+0000 mon.vm01 (mon.0) 649 : audit [INF] from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-16T19:22:39.521 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:39.601 INFO:teuthology.orchestra.run.vm01.stdout:[{"version":14,"timestamp":"2026-04-16T19:22:31.471823+0000","name":"","changes":[{"name":"osd.5/osd_mclock_max_capacity_iops_ssd","new_value":"21764.690540"}]}] 2026-04-16T19:22:39.601 INFO:tasks.ceph_manager:config epoch is 14 2026-04-16T19:22:39.601 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-04-16T19:22:39.601 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-04-16T19:22:39.602 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mgr dump --format=json 2026-04-16T19:22:39.877 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:40.312 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:40.377 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":19,"flags":0,"active_gid":14227,"active_name":"vm01.nwhpas","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6800","nonce":3275389746},{"type":"v1","addr":"192.168.123.101:6801","nonce":3275389746}]},"active_addr":"192.168.123.101:6801/3275389746","active_change":"2026-04-16T19:21:37.248399+0000","active_mgr_features":4541880224203014143,"available":true,"standbys":[{"gid":14260,"name":"vm04.ztqrcx","mgr_features":4541880224203014143,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to, use commas to separate multiple","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"certificate_automated_rotation_enabled":{"name":"certificate_automated_rotation_enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"This flag controls whether cephadm automatically rotates certificates upon expiration.","long_desc":"","tags":[],"see_also":[]},"certificate_check_debug_mode":{"name":"certificate_check_debug_mode","type":"bool","level":"dev","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"FOR TESTING ONLY: This flag forces the certificate check instead of waiting for certificate_check_period.","long_desc":"","tags":[],"see_also":[]},"certificate_check_period":{"name":"certificate_check_period","type":"int","level":"advanced","flags":0,"default_value":"1","min":"0","max":"30","enum_allowed":[],"desc":"Specifies how often (in days) the certificate should be checked for validity.","long_desc":"","tags":[],"see_also":[]},"certificate_duration_days":{"name":"certificate_duration_days","type":"int","level":"advanced","flags":0,"default_value":"1095","min":"90","max":"3650","enum_allowed":[],"desc":"Specifies the duration of self certificates generated and signed by cephadm root CA","long_desc":"","tags":[],"see_also":[]},"certificate_renewal_threshold_days":{"name":"certificate_renewal_threshold_days","type":"int","level":"advanced","flags":0,"default_value":"30","min":"10","max":"90","enum_allowed":[],"desc":"Specifies the lead time in days to initiate certificate renewal before expiration.","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.28.1","min":"","max":"","enum_allowed":[],"desc":"Alertmanager container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"Elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:12.2.0","min":"","max":"","enum_allowed":[],"desc":"Grafana container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"Haproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_nginx":{"name":"container_image_nginx","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nginx:sclorg-nginx-126","min":"","max":"","enum_allowed":[],"desc":"Nginx container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.9.1","min":"","max":"","enum_allowed":[],"desc":"Node exporter container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.5","min":"","max":"","enum_allowed":[],"desc":"Nvmeof container image","long_desc":"","tags":[],"see_also":[]},"container_image_oauth2_proxy":{"name":"container_image_oauth2_proxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/oauth2-proxy/oauth2-proxy:v7.6.0","min":"","max":"","enum_allowed":[],"desc":"Oauth2 proxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v3.6.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba_metrics":{"name":"container_image_samba_metrics","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-metrics:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba metrics container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"Snmp gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"stray_daemon_check_interval":{"name":"stray_daemon_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"how frequently cephadm should check for the presence of stray daemons","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MANAGED_BY_CLUSTERS":{"name":"MANAGED_BY_CLUSTERS","type":"str","level":"advanced","flags":0,"default_value":"[]","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MULTICLUSTER_CONFIG":{"name":"MULTICLUSTER_CONFIG","type":"str","level":"advanced","flags":0,"default_value":"{}","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_HOSTNAME_PER_DAEMON":{"name":"RGW_HOSTNAME_PER_DAEMON","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"UNSAFE_TLS_v1_2":{"name":"UNSAFE_TLS_v1_2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sso_oauth2":{"name":"sso_oauth2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"prometheus_tls_secret_name":{"name":"prometheus_tls_secret_name","type":"str","level":"advanced","flags":0,"default_value":"rook-ceph-prometheus-server-tls","min":"","max":"","enum_allowed":[],"desc":"name of tls secret in k8s for prometheus","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"smb","can_run":true,"error_string":"","module_options":{"internal_store_backend":{"name":"internal_store_backend","type":"str","level":"dev","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"set internal store backend. for develoment and testing only","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_orchestration":{"name":"update_orchestration","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically update orchestration when smb resources are changed","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_cloning":{"name":"pause_cloning","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_purging":{"name":"pause_purging","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous subvolume purge threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to, use commas to separate multiple","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"certificate_automated_rotation_enabled":{"name":"certificate_automated_rotation_enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"This flag controls whether cephadm automatically rotates certificates upon expiration.","long_desc":"","tags":[],"see_also":[]},"certificate_check_debug_mode":{"name":"certificate_check_debug_mode","type":"bool","level":"dev","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"FOR TESTING ONLY: This flag forces the certificate check instead of waiting for certificate_check_period.","long_desc":"","tags":[],"see_also":[]},"certificate_check_period":{"name":"certificate_check_period","type":"int","level":"advanced","flags":0,"default_value":"1","min":"0","max":"30","enum_allowed":[],"desc":"Specifies how often (in days) the certificate should be checked for validity.","long_desc":"","tags":[],"see_also":[]},"certificate_duration_days":{"name":"certificate_duration_days","type":"int","level":"advanced","flags":0,"default_value":"1095","min":"90","max":"3650","enum_allowed":[],"desc":"Specifies the duration of self certificates generated and signed by cephadm root CA","long_desc":"","tags":[],"see_also":[]},"certificate_renewal_threshold_days":{"name":"certificate_renewal_threshold_days","type":"int","level":"advanced","flags":0,"default_value":"30","min":"10","max":"90","enum_allowed":[],"desc":"Specifies the lead time in days to initiate certificate renewal before expiration.","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.28.1","min":"","max":"","enum_allowed":[],"desc":"Alertmanager container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"Elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:12.2.0","min":"","max":"","enum_allowed":[],"desc":"Grafana container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"Haproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_nginx":{"name":"container_image_nginx","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nginx:sclorg-nginx-126","min":"","max":"","enum_allowed":[],"desc":"Nginx container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.9.1","min":"","max":"","enum_allowed":[],"desc":"Node exporter container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.5","min":"","max":"","enum_allowed":[],"desc":"Nvmeof container image","long_desc":"","tags":[],"see_also":[]},"container_image_oauth2_proxy":{"name":"container_image_oauth2_proxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/oauth2-proxy/oauth2-proxy:v7.6.0","min":"","max":"","enum_allowed":[],"desc":"Oauth2 proxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v3.6.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba_metrics":{"name":"container_image_samba_metrics","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-metrics:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba metrics container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"Snmp gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"stray_daemon_check_interval":{"name":"stray_daemon_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"how frequently cephadm should check for the presence of stray daemons","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MANAGED_BY_CLUSTERS":{"name":"MANAGED_BY_CLUSTERS","type":"str","level":"advanced","flags":0,"default_value":"[]","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MULTICLUSTER_CONFIG":{"name":"MULTICLUSTER_CONFIG","type":"str","level":"advanced","flags":0,"default_value":"{}","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_HOSTNAME_PER_DAEMON":{"name":"RGW_HOSTNAME_PER_DAEMON","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"UNSAFE_TLS_v1_2":{"name":"UNSAFE_TLS_v1_2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sso_oauth2":{"name":"sso_oauth2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"prometheus_tls_secret_name":{"name":"prometheus_tls_secret_name","type":"str","level":"advanced","flags":0,"default_value":"rook-ceph-prometheus-server-tls","min":"","max":"","enum_allowed":[],"desc":"name of tls secret in k8s for prometheus","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"smb","can_run":true,"error_string":"","module_options":{"internal_store_backend":{"name":"internal_store_backend","type":"str","level":"dev","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"set internal store backend. for develoment and testing only","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_orchestration":{"name":"update_orchestration","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically update orchestration when smb resources are changed","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_cloning":{"name":"pause_cloning","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_purging":{"name":"pause_purging","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous subvolume purge threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.101:8443/","prometheus":"http://192.168.123.101:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"tentacle":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":5,"active_clients":[{"name":"devicehealth","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":4145513355}]},{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":2958750150}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":2387557749}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":1669754205}]}]} 2026-04-16T19:22:40.379 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-04-16T19:22:40.379 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-04-16T19:22:40.379 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd dump --format=json 2026-04-16T19:22:40.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:40 vm04 bash[34817]: cluster 2026-04-16T19:22:39.265823+0000 mgr.vm01.nwhpas (mgr.14227) 92 : cluster [DBG] pgmap v43: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:40.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:40 vm04 bash[34817]: cluster 2026-04-16T19:22:39.265823+0000 mgr.vm01.nwhpas (mgr.14227) 92 : cluster [DBG] pgmap v43: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:40.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:40 vm04 bash[34817]: audit 2026-04-16T19:22:39.520588+0000 mon.vm01 (mon.0) 650 : audit [DBG] from='client.? 192.168.123.101:0/2552164772' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-16T19:22:40.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:40 vm04 bash[34817]: audit 2026-04-16T19:22:39.520588+0000 mon.vm01 (mon.0) 650 : audit [DBG] from='client.? 192.168.123.101:0/2552164772' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-16T19:22:40.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:40 vm01 bash[28222]: cluster 2026-04-16T19:22:39.265823+0000 mgr.vm01.nwhpas (mgr.14227) 92 : cluster [DBG] pgmap v43: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:40.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:40 vm01 bash[28222]: cluster 2026-04-16T19:22:39.265823+0000 mgr.vm01.nwhpas (mgr.14227) 92 : cluster [DBG] pgmap v43: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:40.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:40 vm01 bash[28222]: audit 2026-04-16T19:22:39.520588+0000 mon.vm01 (mon.0) 650 : audit [DBG] from='client.? 192.168.123.101:0/2552164772' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-16T19:22:40.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:40 vm01 bash[28222]: audit 2026-04-16T19:22:39.520588+0000 mon.vm01 (mon.0) 650 : audit [DBG] from='client.? 192.168.123.101:0/2552164772' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-16T19:22:40.636 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:41.025 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:41.025 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":24,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","created":"2026-04-16T19:20:06.880988+0000","modified":"2026-04-16T19:22:35.810000+0000","last_up_change":"2026-04-16T19:22:33.452884+0000","last_in_change":"2026-04-16T19:22:16.224487+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":10,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-16T19:22:29.558220+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"24","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"f4401b97-80b9-400c-9e95-0cc516e41cfc","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6800","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6801","nonce":2796237597}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6803","nonce":2796237597}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6807","nonce":2796237597}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6805","nonce":2796237597}]},"public_addr":"192.168.123.104:6801/2796237597","cluster_addr":"192.168.123.104:6803/2796237597","heartbeat_back_addr":"192.168.123.104:6807/2796237597","heartbeat_front_addr":"192.168.123.104:6805/2796237597","state":["exists","up"]},{"osd":1,"uuid":"41dc1c75-f170-45be-bb8a-72218138afbc","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6803","nonce":4232106917}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6805","nonce":4232106917}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6809","nonce":4232106917}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6807","nonce":4232106917}]},"public_addr":"192.168.123.101:6803/4232106917","cluster_addr":"192.168.123.101:6805/4232106917","heartbeat_back_addr":"192.168.123.101:6809/4232106917","heartbeat_front_addr":"192.168.123.101:6807/4232106917","state":["exists","up"]},{"osd":2,"uuid":"31f3cbba-e205-40f7-b992-d9d70a84e201","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":20,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6809","nonce":1205758774}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6811","nonce":1205758774}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6815","nonce":1205758774}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6813","nonce":1205758774}]},"public_addr":"192.168.123.104:6809/1205758774","cluster_addr":"192.168.123.104:6811/1205758774","heartbeat_back_addr":"192.168.123.104:6815/1205758774","heartbeat_front_addr":"192.168.123.104:6813/1205758774","state":["exists","up"]},{"osd":3,"uuid":"f8707e07-f8bc-46db-ad2f-0aa8f0a897b7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6811","nonce":1926756932}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6813","nonce":1926756932}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6817","nonce":1926756932}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6815","nonce":1926756932}]},"public_addr":"192.168.123.101:6811/1926756932","cluster_addr":"192.168.123.101:6813/1926756932","heartbeat_back_addr":"192.168.123.101:6817/1926756932","heartbeat_front_addr":"192.168.123.101:6815/1926756932","state":["exists","up"]},{"osd":4,"uuid":"f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6816","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6817","nonce":2077047120}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6818","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6819","nonce":2077047120}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6822","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6823","nonce":2077047120}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6820","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6821","nonce":2077047120}]},"public_addr":"192.168.123.104:6817/2077047120","cluster_addr":"192.168.123.104:6819/2077047120","heartbeat_back_addr":"192.168.123.104:6823/2077047120","heartbeat_front_addr":"192.168.123.104:6821/2077047120","state":["exists","up"]},{"osd":5,"uuid":"79479d78-c6c8-4447-be64-0e15c9cad5ce","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":21,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6819","nonce":3887158052}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6821","nonce":3887158052}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6825","nonce":3887158052}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6823","nonce":3887158052}]},"public_addr":"192.168.123.101:6819/3887158052","cluster_addr":"192.168.123.101:6821/3887158052","heartbeat_back_addr":"192.168.123.101:6825/3887158052","heartbeat_front_addr":"192.168.123.101:6823/3887158052","state":["exists","up"]},{"osd":6,"uuid":"a60a41cd-e249-46a0-a399-39726704af0a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6824","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6825","nonce":3179997190}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6826","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6827","nonce":3179997190}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6830","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6831","nonce":3179997190}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6828","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6829","nonce":3179997190}]},"public_addr":"192.168.123.104:6825/3179997190","cluster_addr":"192.168.123.104:6827/3179997190","heartbeat_back_addr":"192.168.123.104:6831/3179997190","heartbeat_front_addr":"192.168.123.104:6829/3179997190","state":["exists","up"]},{"osd":7,"uuid":"eb9ea244-fcd7-413a-98de-06ed25a24354","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6827","nonce":2609567606}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6829","nonce":2609567606}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6832","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6833","nonce":2609567606}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6831","nonce":2609567606}]},"public_addr":"192.168.123.101:6827/2609567606","cluster_addr":"192.168.123.101:6829/2609567606","heartbeat_back_addr":"192.168.123.101:6833/2609567606","heartbeat_front_addr":"192.168.123.101:6831/2609567606","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:25.745934+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:25.966480+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:27.308776+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:27.797886+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:29.453343+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:29.748657+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:31.234839+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:31.926642+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/3322368483":"2026-04-17T19:21:37.248274+0000","192.168.123.101:6801/4173875572":"2026-04-17T19:21:37.248274+0000","192.168.123.101:0/3503652498":"2026-04-17T19:20:31.772743+0000","192.168.123.101:0/3743328471":"2026-04-17T19:20:31.772743+0000","192.168.123.101:6800/4261365906":"2026-04-17T19:20:54.810746+0000","192.168.123.101:6801/1125309602":"2026-04-17T19:20:31.772743+0000","192.168.123.101:0/954154715":"2026-04-17T19:20:31.772743+0000","192.168.123.101:0/671470231":"2026-04-17T19:20:54.810746+0000","192.168.123.101:6800/1125309602":"2026-04-17T19:20:31.772743+0000","192.168.123.101:6801/4261365906":"2026-04-17T19:20:54.810746+0000","192.168.123.101:0/3082463839":"2026-04-17T19:21:37.248274+0000","192.168.123.101:0/1914076311":"2026-04-17T19:20:54.810746+0000","192.168.123.101:0/2586291185":"2026-04-17T19:20:54.810746+0000","192.168.123.101:0/1908358679":"2026-04-17T19:21:37.248274+0000","192.168.123.101:6800/4173875572":"2026-04-17T19:21:37.248274+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-16T19:22:41.093 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-04-16T19:22:41.093 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd dump --format=json 2026-04-16T19:22:41.375 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:41.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:41 vm01 bash[28222]: audit 2026-04-16T19:22:40.308088+0000 mon.vm01 (mon.0) 651 : audit [DBG] from='client.? 192.168.123.101:0/943709434' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-16T19:22:41.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:41 vm01 bash[28222]: audit 2026-04-16T19:22:40.308088+0000 mon.vm01 (mon.0) 651 : audit [DBG] from='client.? 192.168.123.101:0/943709434' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-16T19:22:41.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:41 vm01 bash[28222]: audit 2026-04-16T19:22:41.024034+0000 mon.vm01 (mon.0) 652 : audit [DBG] from='client.? 192.168.123.101:0/2415262149' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:41.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:41 vm01 bash[28222]: audit 2026-04-16T19:22:41.024034+0000 mon.vm01 (mon.0) 652 : audit [DBG] from='client.? 192.168.123.101:0/2415262149' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:41.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:41 vm04 bash[34817]: audit 2026-04-16T19:22:40.308088+0000 mon.vm01 (mon.0) 651 : audit [DBG] from='client.? 192.168.123.101:0/943709434' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-16T19:22:41.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:41 vm04 bash[34817]: audit 2026-04-16T19:22:40.308088+0000 mon.vm01 (mon.0) 651 : audit [DBG] from='client.? 192.168.123.101:0/943709434' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-16T19:22:41.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:41 vm04 bash[34817]: audit 2026-04-16T19:22:41.024034+0000 mon.vm01 (mon.0) 652 : audit [DBG] from='client.? 192.168.123.101:0/2415262149' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:41.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:41 vm04 bash[34817]: audit 2026-04-16T19:22:41.024034+0000 mon.vm01 (mon.0) 652 : audit [DBG] from='client.? 192.168.123.101:0/2415262149' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:41.754 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:41.754 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":24,"fsid":"3711bb6a-39c9-11f1-9688-8928648d55a6","created":"2026-04-16T19:20:06.880988+0000","modified":"2026-04-16T19:22:35.810000+0000","last_up_change":"2026-04-16T19:22:33.452884+0000","last_in_change":"2026-04-16T19:22:16.224487+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":10,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-16T19:22:29.558220+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"24","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"f4401b97-80b9-400c-9e95-0cc516e41cfc","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6800","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6801","nonce":2796237597}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6803","nonce":2796237597}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6807","nonce":2796237597}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":2796237597},{"type":"v1","addr":"192.168.123.104:6805","nonce":2796237597}]},"public_addr":"192.168.123.104:6801/2796237597","cluster_addr":"192.168.123.104:6803/2796237597","heartbeat_back_addr":"192.168.123.104:6807/2796237597","heartbeat_front_addr":"192.168.123.104:6805/2796237597","state":["exists","up"]},{"osd":1,"uuid":"41dc1c75-f170-45be-bb8a-72218138afbc","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6803","nonce":4232106917}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6805","nonce":4232106917}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6809","nonce":4232106917}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":4232106917},{"type":"v1","addr":"192.168.123.101:6807","nonce":4232106917}]},"public_addr":"192.168.123.101:6803/4232106917","cluster_addr":"192.168.123.101:6805/4232106917","heartbeat_back_addr":"192.168.123.101:6809/4232106917","heartbeat_front_addr":"192.168.123.101:6807/4232106917","state":["exists","up"]},{"osd":2,"uuid":"31f3cbba-e205-40f7-b992-d9d70a84e201","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":20,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6809","nonce":1205758774}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6811","nonce":1205758774}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6815","nonce":1205758774}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":1205758774},{"type":"v1","addr":"192.168.123.104:6813","nonce":1205758774}]},"public_addr":"192.168.123.104:6809/1205758774","cluster_addr":"192.168.123.104:6811/1205758774","heartbeat_back_addr":"192.168.123.104:6815/1205758774","heartbeat_front_addr":"192.168.123.104:6813/1205758774","state":["exists","up"]},{"osd":3,"uuid":"f8707e07-f8bc-46db-ad2f-0aa8f0a897b7","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6811","nonce":1926756932}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6813","nonce":1926756932}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6817","nonce":1926756932}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":1926756932},{"type":"v1","addr":"192.168.123.101:6815","nonce":1926756932}]},"public_addr":"192.168.123.101:6811/1926756932","cluster_addr":"192.168.123.101:6813/1926756932","heartbeat_back_addr":"192.168.123.101:6817/1926756932","heartbeat_front_addr":"192.168.123.101:6815/1926756932","state":["exists","up"]},{"osd":4,"uuid":"f6fc03f6-f8f4-4a11-9138-dc78fb6fe8d0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6816","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6817","nonce":2077047120}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6818","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6819","nonce":2077047120}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6822","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6823","nonce":2077047120}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6820","nonce":2077047120},{"type":"v1","addr":"192.168.123.104:6821","nonce":2077047120}]},"public_addr":"192.168.123.104:6817/2077047120","cluster_addr":"192.168.123.104:6819/2077047120","heartbeat_back_addr":"192.168.123.104:6823/2077047120","heartbeat_front_addr":"192.168.123.104:6821/2077047120","state":["exists","up"]},{"osd":5,"uuid":"79479d78-c6c8-4447-be64-0e15c9cad5ce","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":21,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6819","nonce":3887158052}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6821","nonce":3887158052}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6825","nonce":3887158052}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":3887158052},{"type":"v1","addr":"192.168.123.101:6823","nonce":3887158052}]},"public_addr":"192.168.123.101:6819/3887158052","cluster_addr":"192.168.123.101:6821/3887158052","heartbeat_back_addr":"192.168.123.101:6825/3887158052","heartbeat_front_addr":"192.168.123.101:6823/3887158052","state":["exists","up"]},{"osd":6,"uuid":"a60a41cd-e249-46a0-a399-39726704af0a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6824","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6825","nonce":3179997190}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6826","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6827","nonce":3179997190}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6830","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6831","nonce":3179997190}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6828","nonce":3179997190},{"type":"v1","addr":"192.168.123.104:6829","nonce":3179997190}]},"public_addr":"192.168.123.104:6825/3179997190","cluster_addr":"192.168.123.104:6827/3179997190","heartbeat_back_addr":"192.168.123.104:6831/3179997190","heartbeat_front_addr":"192.168.123.104:6829/3179997190","state":["exists","up"]},{"osd":7,"uuid":"eb9ea244-fcd7-413a-98de-06ed25a24354","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6827","nonce":2609567606}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6829","nonce":2609567606}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6832","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6833","nonce":2609567606}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":2609567606},{"type":"v1","addr":"192.168.123.101:6831","nonce":2609567606}]},"public_addr":"192.168.123.101:6827/2609567606","cluster_addr":"192.168.123.101:6829/2609567606","heartbeat_back_addr":"192.168.123.101:6833/2609567606","heartbeat_front_addr":"192.168.123.101:6831/2609567606","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:25.745934+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:25.966480+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:27.308776+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:27.797886+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:29.453343+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:29.748657+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:31.234839+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-16T19:22:31.926642+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/3322368483":"2026-04-17T19:21:37.248274+0000","192.168.123.101:6801/4173875572":"2026-04-17T19:21:37.248274+0000","192.168.123.101:0/3503652498":"2026-04-17T19:20:31.772743+0000","192.168.123.101:0/3743328471":"2026-04-17T19:20:31.772743+0000","192.168.123.101:6800/4261365906":"2026-04-17T19:20:54.810746+0000","192.168.123.101:6801/1125309602":"2026-04-17T19:20:31.772743+0000","192.168.123.101:0/954154715":"2026-04-17T19:20:31.772743+0000","192.168.123.101:0/671470231":"2026-04-17T19:20:54.810746+0000","192.168.123.101:6800/1125309602":"2026-04-17T19:20:31.772743+0000","192.168.123.101:6801/4261365906":"2026-04-17T19:20:54.810746+0000","192.168.123.101:0/3082463839":"2026-04-17T19:21:37.248274+0000","192.168.123.101:0/1914076311":"2026-04-17T19:20:54.810746+0000","192.168.123.101:0/2586291185":"2026-04-17T19:20:54.810746+0000","192.168.123.101:0/1908358679":"2026-04-17T19:21:37.248274+0000","192.168.123.101:6800/4173875572":"2026-04-17T19:21:37.248274+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-16T19:22:41.831 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph tell osd.0 flush_pg_stats 2026-04-16T19:22:41.831 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph tell osd.1 flush_pg_stats 2026-04-16T19:22:41.831 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph tell osd.2 flush_pg_stats 2026-04-16T19:22:41.831 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph tell osd.3 flush_pg_stats 2026-04-16T19:22:41.831 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph tell osd.4 flush_pg_stats 2026-04-16T19:22:41.831 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph tell osd.5 flush_pg_stats 2026-04-16T19:22:41.831 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph tell osd.6 flush_pg_stats 2026-04-16T19:22:41.832 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph tell osd.7 flush_pg_stats 2026-04-16T19:22:42.406 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:42.415 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:42.417 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:42.418 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:42.429 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:42.464 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:42.483 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:42 vm01 bash[28222]: cluster 2026-04-16T19:22:41.266071+0000 mgr.vm01.nwhpas (mgr.14227) 93 : cluster [DBG] pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:42.483 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:42 vm01 bash[28222]: cluster 2026-04-16T19:22:41.266071+0000 mgr.vm01.nwhpas (mgr.14227) 93 : cluster [DBG] pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:42.483 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:42 vm01 bash[28222]: audit 2026-04-16T19:22:41.753455+0000 mon.vm01 (mon.0) 653 : audit [DBG] from='client.? 192.168.123.101:0/1295734977' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:42.483 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:42 vm01 bash[28222]: audit 2026-04-16T19:22:41.753455+0000 mon.vm01 (mon.0) 653 : audit [DBG] from='client.? 192.168.123.101:0/1295734977' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:42.515 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:42.524 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:42.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:42 vm04 bash[34817]: cluster 2026-04-16T19:22:41.266071+0000 mgr.vm01.nwhpas (mgr.14227) 93 : cluster [DBG] pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:42.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:42 vm04 bash[34817]: cluster 2026-04-16T19:22:41.266071+0000 mgr.vm01.nwhpas (mgr.14227) 93 : cluster [DBG] pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:42.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:42 vm04 bash[34817]: audit 2026-04-16T19:22:41.753455+0000 mon.vm01 (mon.0) 653 : audit [DBG] from='client.? 192.168.123.101:0/1295734977' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:42.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:42 vm04 bash[34817]: audit 2026-04-16T19:22:41.753455+0000 mon.vm01 (mon.0) 653 : audit [DBG] from='client.? 192.168.123.101:0/1295734977' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-16T19:22:43.068 INFO:teuthology.orchestra.run.vm01.stdout:77309411332 2026-04-16T19:22:43.069 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd last-stat-seq osd.3 2026-04-16T19:22:43.213 INFO:teuthology.orchestra.run.vm01.stdout:85899345924 2026-04-16T19:22:43.214 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd last-stat-seq osd.5 2026-04-16T19:22:43.287 INFO:teuthology.orchestra.run.vm01.stdout:90194313220 2026-04-16T19:22:43.287 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd last-stat-seq osd.6 2026-04-16T19:22:43.460 INFO:teuthology.orchestra.run.vm01.stdout:94489280515 2026-04-16T19:22:43.461 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd last-stat-seq osd.7 2026-04-16T19:22:43.473 INFO:teuthology.orchestra.run.vm01.stdout:68719476740 2026-04-16T19:22:43.474 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd last-stat-seq osd.0 2026-04-16T19:22:43.492 INFO:teuthology.orchestra.run.vm01.stdout:73014444036 2026-04-16T19:22:43.493 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd last-stat-seq osd.2 2026-04-16T19:22:43.493 INFO:teuthology.orchestra.run.vm01.stdout:68719476740 2026-04-16T19:22:43.493 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd last-stat-seq osd.1 2026-04-16T19:22:43.501 INFO:teuthology.orchestra.run.vm01.stdout:81604378628 2026-04-16T19:22:43.502 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd last-stat-seq osd.4 2026-04-16T19:22:43.516 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:43.588 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:43.818 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:43.818 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:44.011 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:44.155 INFO:teuthology.orchestra.run.vm01.stdout:77309411332 2026-04-16T19:22:44.179 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:44.205 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:44.205 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:44.380 INFO:tasks.cephadm.ceph_manager.ceph:need seq 77309411332 got 77309411332 for osd.3 2026-04-16T19:22:44.381 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:44 vm01 bash[28222]: cluster 2026-04-16T19:22:43.267540+0000 mgr.vm01.nwhpas (mgr.14227) 94 : cluster [DBG] pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:44.381 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:44 vm01 bash[28222]: cluster 2026-04-16T19:22:43.267540+0000 mgr.vm01.nwhpas (mgr.14227) 94 : cluster [DBG] pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:44.381 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:44 vm01 bash[28222]: audit 2026-04-16T19:22:44.150618+0000 mon.vm01 (mon.0) 654 : audit [DBG] from='client.? 192.168.123.101:0/3167001051' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-16T19:22:44.381 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:44 vm01 bash[28222]: audit 2026-04-16T19:22:44.150618+0000 mon.vm01 (mon.0) 654 : audit [DBG] from='client.? 192.168.123.101:0/3167001051' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-16T19:22:44.381 DEBUG:teuthology.parallel:result is None 2026-04-16T19:22:44.413 INFO:teuthology.orchestra.run.vm01.stdout:85899345924 2026-04-16T19:22:44.635 INFO:tasks.cephadm.ceph_manager.ceph:need seq 85899345924 got 85899345924 for osd.5 2026-04-16T19:22:44.635 DEBUG:teuthology.parallel:result is None 2026-04-16T19:22:44.635 INFO:teuthology.orchestra.run.vm01.stdout:94489280515 2026-04-16T19:22:44.704 INFO:teuthology.orchestra.run.vm01.stdout:90194313220 2026-04-16T19:22:44.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:44 vm04 bash[34817]: cluster 2026-04-16T19:22:43.267540+0000 mgr.vm01.nwhpas (mgr.14227) 94 : cluster [DBG] pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:44.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:44 vm04 bash[34817]: cluster 2026-04-16T19:22:43.267540+0000 mgr.vm01.nwhpas (mgr.14227) 94 : cluster [DBG] pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:44.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:44 vm04 bash[34817]: audit 2026-04-16T19:22:44.150618+0000 mon.vm01 (mon.0) 654 : audit [DBG] from='client.? 192.168.123.101:0/3167001051' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-16T19:22:44.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:44 vm04 bash[34817]: audit 2026-04-16T19:22:44.150618+0000 mon.vm01 (mon.0) 654 : audit [DBG] from='client.? 192.168.123.101:0/3167001051' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-16T19:22:44.775 INFO:tasks.cephadm.ceph_manager.ceph:need seq 94489280515 got 94489280515 for osd.7 2026-04-16T19:22:44.776 DEBUG:teuthology.parallel:result is None 2026-04-16T19:22:44.876 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313220 got 90194313220 for osd.6 2026-04-16T19:22:44.877 DEBUG:teuthology.parallel:result is None 2026-04-16T19:22:44.903 INFO:teuthology.orchestra.run.vm01.stdout:81604378627 2026-04-16T19:22:44.957 INFO:teuthology.orchestra.run.vm01.stdout:68719476740 2026-04-16T19:22:44.968 INFO:teuthology.orchestra.run.vm01.stdout:68719476740 2026-04-16T19:22:45.011 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378628 got 81604378627 for osd.4 2026-04-16T19:22:45.039 INFO:teuthology.orchestra.run.vm01.stdout:73014444035 2026-04-16T19:22:45.075 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476740 got 68719476740 for osd.0 2026-04-16T19:22:45.075 DEBUG:teuthology.parallel:result is None 2026-04-16T19:22:45.111 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476740 got 68719476740 for osd.1 2026-04-16T19:22:45.111 DEBUG:teuthology.parallel:result is None 2026-04-16T19:22:45.135 INFO:tasks.cephadm.ceph_manager.ceph:need seq 73014444036 got 73014444035 for osd.2 2026-04-16T19:22:45.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.410358+0000 mon.vm01 (mon.0) 655 : audit [DBG] from='client.? 192.168.123.101:0/328645811' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-16T19:22:45.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.410358+0000 mon.vm01 (mon.0) 655 : audit [DBG] from='client.? 192.168.123.101:0/328645811' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-16T19:22:45.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.635427+0000 mon.vm01 (mon.0) 656 : audit [DBG] from='client.? 192.168.123.101:0/2758989644' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-16T19:22:45.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.635427+0000 mon.vm01 (mon.0) 656 : audit [DBG] from='client.? 192.168.123.101:0/2758989644' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-16T19:22:45.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.701155+0000 mon.vm01 (mon.0) 657 : audit [DBG] from='client.? 192.168.123.101:0/303118180' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-16T19:22:45.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.701155+0000 mon.vm01 (mon.0) 657 : audit [DBG] from='client.? 192.168.123.101:0/303118180' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-16T19:22:45.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.902488+0000 mon.vm01 (mon.0) 658 : audit [DBG] from='client.? 192.168.123.101:0/1609841547' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-16T19:22:45.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.902488+0000 mon.vm01 (mon.0) 658 : audit [DBG] from='client.? 192.168.123.101:0/1609841547' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-16T19:22:45.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.956102+0000 mon.vm04 (mon.1) 23 : audit [DBG] from='client.? 192.168.123.101:0/2642313520' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-16T19:22:45.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.956102+0000 mon.vm04 (mon.1) 23 : audit [DBG] from='client.? 192.168.123.101:0/2642313520' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-16T19:22:45.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.966566+0000 mon.vm04 (mon.1) 24 : audit [DBG] from='client.? 192.168.123.101:0/2215476912' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-16T19:22:45.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:44.966566+0000 mon.vm04 (mon.1) 24 : audit [DBG] from='client.? 192.168.123.101:0/2215476912' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-16T19:22:45.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:45.038273+0000 mon.vm04 (mon.1) 25 : audit [DBG] from='client.? 192.168.123.101:0/3819405794' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-16T19:22:45.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:45 vm04 bash[34817]: audit 2026-04-16T19:22:45.038273+0000 mon.vm04 (mon.1) 25 : audit [DBG] from='client.? 192.168.123.101:0/3819405794' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.410358+0000 mon.vm01 (mon.0) 655 : audit [DBG] from='client.? 192.168.123.101:0/328645811' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.410358+0000 mon.vm01 (mon.0) 655 : audit [DBG] from='client.? 192.168.123.101:0/328645811' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.635427+0000 mon.vm01 (mon.0) 656 : audit [DBG] from='client.? 192.168.123.101:0/2758989644' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.635427+0000 mon.vm01 (mon.0) 656 : audit [DBG] from='client.? 192.168.123.101:0/2758989644' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.701155+0000 mon.vm01 (mon.0) 657 : audit [DBG] from='client.? 192.168.123.101:0/303118180' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.701155+0000 mon.vm01 (mon.0) 657 : audit [DBG] from='client.? 192.168.123.101:0/303118180' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.902488+0000 mon.vm01 (mon.0) 658 : audit [DBG] from='client.? 192.168.123.101:0/1609841547' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.902488+0000 mon.vm01 (mon.0) 658 : audit [DBG] from='client.? 192.168.123.101:0/1609841547' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.956102+0000 mon.vm04 (mon.1) 23 : audit [DBG] from='client.? 192.168.123.101:0/2642313520' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.956102+0000 mon.vm04 (mon.1) 23 : audit [DBG] from='client.? 192.168.123.101:0/2642313520' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.966566+0000 mon.vm04 (mon.1) 24 : audit [DBG] from='client.? 192.168.123.101:0/2215476912' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:44.966566+0000 mon.vm04 (mon.1) 24 : audit [DBG] from='client.? 192.168.123.101:0/2215476912' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:45.038273+0000 mon.vm04 (mon.1) 25 : audit [DBG] from='client.? 192.168.123.101:0/3819405794' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-16T19:22:45.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:45 vm01 bash[28222]: audit 2026-04-16T19:22:45.038273+0000 mon.vm04 (mon.1) 25 : audit [DBG] from='client.? 192.168.123.101:0/3819405794' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-16T19:22:46.012 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd last-stat-seq osd.4 2026-04-16T19:22:46.136 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph osd last-stat-seq osd.2 2026-04-16T19:22:46.271 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:46.417 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:46.589 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:46 vm01 bash[28222]: cluster 2026-04-16T19:22:45.267835+0000 mgr.vm01.nwhpas (mgr.14227) 95 : cluster [DBG] pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:46.589 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:46 vm01 bash[28222]: cluster 2026-04-16T19:22:45.267835+0000 mgr.vm01.nwhpas (mgr.14227) 95 : cluster [DBG] pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:46.668 INFO:teuthology.orchestra.run.vm01.stdout:81604378628 2026-04-16T19:22:46.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:46 vm04 bash[34817]: cluster 2026-04-16T19:22:45.267835+0000 mgr.vm01.nwhpas (mgr.14227) 95 : cluster [DBG] pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:46.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:46 vm04 bash[34817]: cluster 2026-04-16T19:22:45.267835+0000 mgr.vm01.nwhpas (mgr.14227) 95 : cluster [DBG] pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:46.750 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378628 got 81604378628 for osd.4 2026-04-16T19:22:46.750 DEBUG:teuthology.parallel:result is None 2026-04-16T19:22:46.804 INFO:teuthology.orchestra.run.vm01.stdout:73014444037 2026-04-16T19:22:46.901 INFO:tasks.cephadm.ceph_manager.ceph:need seq 73014444036 got 73014444037 for osd.2 2026-04-16T19:22:46.901 DEBUG:teuthology.parallel:result is None 2026-04-16T19:22:46.901 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-04-16T19:22:46.901 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph pg dump --format=json 2026-04-16T19:22:47.171 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:47.538 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:47.539 INFO:teuthology.orchestra.run.vm01.stderr:dumped all 2026-04-16T19:22:47.551 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:47 vm01 bash[28222]: audit 2026-04-16T19:22:46.667622+0000 mon.vm01 (mon.0) 659 : audit [DBG] from='client.? 192.168.123.101:0/2268844767' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-16T19:22:47.551 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:47 vm01 bash[28222]: audit 2026-04-16T19:22:46.667622+0000 mon.vm01 (mon.0) 659 : audit [DBG] from='client.? 192.168.123.101:0/2268844767' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-16T19:22:47.551 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:47 vm01 bash[28222]: audit 2026-04-16T19:22:46.803821+0000 mon.vm01 (mon.0) 660 : audit [DBG] from='client.? 192.168.123.101:0/837545799' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-16T19:22:47.551 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:47 vm01 bash[28222]: audit 2026-04-16T19:22:46.803821+0000 mon.vm01 (mon.0) 660 : audit [DBG] from='client.? 192.168.123.101:0/837545799' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-16T19:22:47.644 INFO:teuthology.orchestra.run.vm01.stdout:{"pg_ready":true,"pg_map":{"version":47,"stamp":"2026-04-16T19:22:47.267951+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":192,"ondisk_log_size":192,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167706624,"kb_used":219928,"kb_used_data":3564,"kb_used_omap":56,"kb_used_meta":215495,"kb_avail":167486696,"statfs":{"total":171731582976,"available":171506376704,"internally_reserved":0,"allocated":3649536,"data_stored":2459528,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":57795,"internal_metadata":220667453},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.002267"},"pg_stats":[{"pgid":"1.0","version":"24'192","reported_seq":242,"reported_epoch":24,"state":"active+clean","last_fresh":"2026-04-16T19:22:36.234528+0000","last_change":"2026-04-16T19:22:35.357913+0000","last_active":"2026-04-16T19:22:36.234528+0000","last_peered":"2026-04-16T19:22:36.234528+0000","last_clean":"2026-04-16T19:22:36.234528+0000","last_became_active":"2026-04-16T19:22:35.357700+0000","last_became_peered":"2026-04-16T19:22:35.357700+0000","last_unstale":"2026-04-16T19:22:36.234528+0000","last_undegraded":"2026-04-16T19:22:36.234528+0000","last_fullsized":"2026-04-16T19:22:36.234528+0000","mapping_epoch":22,"log_start":"0'0","ondisk_log_start":"0'0","created":18,"last_epoch_clean":23,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-04-16T19:22:29.750015+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-04-16T19:22:29.750015+0000","last_clean_scrub_stamp":"2026-04-16T19:22:29.750015+0000","objects_scrubbed":0,"log_size":192,"log_dups_size":0,"ondisk_log_size":192,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-04-18T03:33:33.391986+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[6,5,2],"acting":[6,5,2],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":6,"acting_primary":6,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1781760,"data_stored":1771104,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":192,"ondisk_log_size":192,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":22,"seq":94489280516,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27188,"kb_used_data":228,"kb_used_omap":5,"kb_used_meta":26938,"kb_avail":20936140,"statfs":{"total":21466447872,"available":21438607360,"internally_reserved":0,"allocated":233472,"data_stored":86053,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":5510,"internal_metadata":27585146},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":21,"seq":90194313220,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27956,"kb_used_data":808,"kb_used_omap":6,"kb_used_meta":26937,"kb_avail":20935372,"statfs":{"total":21466447872,"available":21437820928,"internally_reserved":0,"allocated":827392,"data_stored":676421,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6162,"internal_metadata":27584494},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":20,"seq":85899345924,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":28016,"kb_used_data":808,"kb_used_omap":6,"kb_used_meta":26937,"kb_avail":20935312,"statfs":{"total":21466447872,"available":21437759488,"internally_reserved":0,"allocated":827392,"data_stored":676421,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6821,"internal_metadata":27583835},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":19,"seq":81604378629,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27188,"kb_used_data":228,"kb_used_omap":4,"kb_used_meta":26939,"kb_avail":20936140,"statfs":{"total":21466447872,"available":21438607360,"internally_reserved":0,"allocated":233472,"data_stored":86053,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":4871,"internal_metadata":27585785},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":18,"seq":77309411333,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27188,"kb_used_data":228,"kb_used_omap":7,"kb_used_meta":26936,"kb_avail":20936140,"statfs":{"total":21466447872,"available":21438607360,"internally_reserved":0,"allocated":233472,"data_stored":86053,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8119,"internal_metadata":27582537},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":17,"seq":73014444037,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":28016,"kb_used_data":808,"kb_used_omap":8,"kb_used_meta":26935,"kb_avail":20935312,"statfs":{"total":21466447872,"available":21437759488,"internally_reserved":0,"allocated":827392,"data_stored":676421,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8770,"internal_metadata":27581886},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27188,"kb_used_data":228,"kb_used_omap":9,"kb_used_meta":26934,"kb_avail":20936140,"statfs":{"total":21466447872,"available":21438607360,"internally_reserved":0,"allocated":233472,"data_stored":86053,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":9421,"internal_metadata":27581235},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27188,"kb_used_data":228,"kb_used_omap":7,"kb_used_meta":26936,"kb_avail":20936140,"statfs":{"total":21466447872,"available":21438607360,"internally_reserved":0,"allocated":233472,"data_stored":86053,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8121,"internal_metadata":27582535},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-04-16T19:22:47.644 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph pg dump --format=json 2026-04-16T19:22:47.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:47 vm04 bash[34817]: audit 2026-04-16T19:22:46.667622+0000 mon.vm01 (mon.0) 659 : audit [DBG] from='client.? 192.168.123.101:0/2268844767' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-16T19:22:47.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:47 vm04 bash[34817]: audit 2026-04-16T19:22:46.667622+0000 mon.vm01 (mon.0) 659 : audit [DBG] from='client.? 192.168.123.101:0/2268844767' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-16T19:22:47.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:47 vm04 bash[34817]: audit 2026-04-16T19:22:46.803821+0000 mon.vm01 (mon.0) 660 : audit [DBG] from='client.? 192.168.123.101:0/837545799' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-16T19:22:47.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:47 vm04 bash[34817]: audit 2026-04-16T19:22:46.803821+0000 mon.vm01 (mon.0) 660 : audit [DBG] from='client.? 192.168.123.101:0/837545799' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-16T19:22:47.903 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:48.287 INFO:teuthology.orchestra.run.vm01.stderr:dumped all 2026-04-16T19:22:48.287 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:48.361 INFO:teuthology.orchestra.run.vm01.stdout:{"pg_ready":true,"pg_map":{"version":47,"stamp":"2026-04-16T19:22:47.267951+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":192,"ondisk_log_size":192,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167706624,"kb_used":219928,"kb_used_data":3564,"kb_used_omap":56,"kb_used_meta":215495,"kb_avail":167486696,"statfs":{"total":171731582976,"available":171506376704,"internally_reserved":0,"allocated":3649536,"data_stored":2459528,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":57795,"internal_metadata":220667453},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.002267"},"pg_stats":[{"pgid":"1.0","version":"24'192","reported_seq":242,"reported_epoch":24,"state":"active+clean","last_fresh":"2026-04-16T19:22:36.234528+0000","last_change":"2026-04-16T19:22:35.357913+0000","last_active":"2026-04-16T19:22:36.234528+0000","last_peered":"2026-04-16T19:22:36.234528+0000","last_clean":"2026-04-16T19:22:36.234528+0000","last_became_active":"2026-04-16T19:22:35.357700+0000","last_became_peered":"2026-04-16T19:22:35.357700+0000","last_unstale":"2026-04-16T19:22:36.234528+0000","last_undegraded":"2026-04-16T19:22:36.234528+0000","last_fullsized":"2026-04-16T19:22:36.234528+0000","mapping_epoch":22,"log_start":"0'0","ondisk_log_start":"0'0","created":18,"last_epoch_clean":23,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-04-16T19:22:29.750015+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-04-16T19:22:29.750015+0000","last_clean_scrub_stamp":"2026-04-16T19:22:29.750015+0000","objects_scrubbed":0,"log_size":192,"log_dups_size":0,"ondisk_log_size":192,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-04-18T03:33:33.391986+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[6,5,2],"acting":[6,5,2],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":6,"acting_primary":6,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1781760,"data_stored":1771104,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":192,"ondisk_log_size":192,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":22,"seq":94489280516,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27188,"kb_used_data":228,"kb_used_omap":5,"kb_used_meta":26938,"kb_avail":20936140,"statfs":{"total":21466447872,"available":21438607360,"internally_reserved":0,"allocated":233472,"data_stored":86053,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":5510,"internal_metadata":27585146},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":21,"seq":90194313220,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27956,"kb_used_data":808,"kb_used_omap":6,"kb_used_meta":26937,"kb_avail":20935372,"statfs":{"total":21466447872,"available":21437820928,"internally_reserved":0,"allocated":827392,"data_stored":676421,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6162,"internal_metadata":27584494},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":20,"seq":85899345924,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":28016,"kb_used_data":808,"kb_used_omap":6,"kb_used_meta":26937,"kb_avail":20935312,"statfs":{"total":21466447872,"available":21437759488,"internally_reserved":0,"allocated":827392,"data_stored":676421,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6821,"internal_metadata":27583835},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":19,"seq":81604378629,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27188,"kb_used_data":228,"kb_used_omap":4,"kb_used_meta":26939,"kb_avail":20936140,"statfs":{"total":21466447872,"available":21438607360,"internally_reserved":0,"allocated":233472,"data_stored":86053,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":4871,"internal_metadata":27585785},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":18,"seq":77309411333,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27188,"kb_used_data":228,"kb_used_omap":7,"kb_used_meta":26936,"kb_avail":20936140,"statfs":{"total":21466447872,"available":21438607360,"internally_reserved":0,"allocated":233472,"data_stored":86053,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8119,"internal_metadata":27582537},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":17,"seq":73014444037,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":28016,"kb_used_data":808,"kb_used_omap":8,"kb_used_meta":26935,"kb_avail":20935312,"statfs":{"total":21466447872,"available":21437759488,"internally_reserved":0,"allocated":827392,"data_stored":676421,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8770,"internal_metadata":27581886},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27188,"kb_used_data":228,"kb_used_omap":9,"kb_used_meta":26934,"kb_avail":20936140,"statfs":{"total":21466447872,"available":21438607360,"internally_reserved":0,"allocated":233472,"data_stored":86053,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":9421,"internal_metadata":27581235},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27188,"kb_used_data":228,"kb_used_omap":7,"kb_used_meta":26936,"kb_avail":20936140,"statfs":{"total":21466447872,"available":21438607360,"internally_reserved":0,"allocated":233472,"data_stored":86053,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8121,"internal_metadata":27582535},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-04-16T19:22:48.362 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-04-16T19:22:48.362 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-04-16T19:22:48.362 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-04-16T19:22:48.362 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph health --format=json 2026-04-16T19:22:48.621 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:48.663 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:48 vm01 bash[28222]: cluster 2026-04-16T19:22:47.268053+0000 mgr.vm01.nwhpas (mgr.14227) 96 : cluster [DBG] pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:48.663 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:48 vm01 bash[28222]: cluster 2026-04-16T19:22:47.268053+0000 mgr.vm01.nwhpas (mgr.14227) 96 : cluster [DBG] pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:48.663 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:48 vm01 bash[28222]: audit 2026-04-16T19:22:47.537900+0000 mgr.vm01.nwhpas (mgr.14227) 97 : audit [DBG] from='client.14520 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:48.663 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:48 vm01 bash[28222]: audit 2026-04-16T19:22:47.537900+0000 mgr.vm01.nwhpas (mgr.14227) 97 : audit [DBG] from='client.14520 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:48.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:48 vm04 bash[34817]: cluster 2026-04-16T19:22:47.268053+0000 mgr.vm01.nwhpas (mgr.14227) 96 : cluster [DBG] pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:48.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:48 vm04 bash[34817]: cluster 2026-04-16T19:22:47.268053+0000 mgr.vm01.nwhpas (mgr.14227) 96 : cluster [DBG] pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:48.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:48 vm04 bash[34817]: audit 2026-04-16T19:22:47.537900+0000 mgr.vm01.nwhpas (mgr.14227) 97 : audit [DBG] from='client.14520 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:48.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:48 vm04 bash[34817]: audit 2026-04-16T19:22:47.537900+0000 mgr.vm01.nwhpas (mgr.14227) 97 : audit [DBG] from='client.14520 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:49.017 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:49.017 INFO:teuthology.orchestra.run.vm01.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-04-16T19:22:49.113 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-04-16T19:22:49.113 INFO:tasks.cephadm:Setup complete, yielding 2026-04-16T19:22:49.113 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-16T19:22:49.115 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm01.local 2026-04-16T19:22:49.115 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch status' 2026-04-16T19:22:49.361 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:49 vm01 bash[28222]: audit 2026-04-16T19:22:48.286595+0000 mgr.vm01.nwhpas (mgr.14227) 98 : audit [DBG] from='client.14524 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:49.361 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:49 vm01 bash[28222]: audit 2026-04-16T19:22:48.286595+0000 mgr.vm01.nwhpas (mgr.14227) 98 : audit [DBG] from='client.14524 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:49.361 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:49 vm01 bash[28222]: audit 2026-04-16T19:22:49.016903+0000 mon.vm01 (mon.0) 661 : audit [DBG] from='client.? 192.168.123.101:0/2290089283' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-16T19:22:49.361 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:49 vm01 bash[28222]: audit 2026-04-16T19:22:49.016903+0000 mon.vm01 (mon.0) 661 : audit [DBG] from='client.? 192.168.123.101:0/2290089283' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-16T19:22:49.379 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:49.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:49 vm04 bash[34817]: audit 2026-04-16T19:22:48.286595+0000 mgr.vm01.nwhpas (mgr.14227) 98 : audit [DBG] from='client.14524 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:49.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:49 vm04 bash[34817]: audit 2026-04-16T19:22:48.286595+0000 mgr.vm01.nwhpas (mgr.14227) 98 : audit [DBG] from='client.14524 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:49.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:49 vm04 bash[34817]: audit 2026-04-16T19:22:49.016903+0000 mon.vm01 (mon.0) 661 : audit [DBG] from='client.? 192.168.123.101:0/2290089283' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-16T19:22:49.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:49 vm04 bash[34817]: audit 2026-04-16T19:22:49.016903+0000 mon.vm01 (mon.0) 661 : audit [DBG] from='client.? 192.168.123.101:0/2290089283' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-16T19:22:49.758 INFO:teuthology.orchestra.run.vm01.stdout:Backend: cephadm 2026-04-16T19:22:49.759 INFO:teuthology.orchestra.run.vm01.stdout:Available: Yes 2026-04-16T19:22:49.759 INFO:teuthology.orchestra.run.vm01.stdout:Paused: No 2026-04-16T19:22:49.824 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch ps' 2026-04-16T19:22:50.077 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:50.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:50 vm01 bash[28222]: cluster 2026-04-16T19:22:49.268312+0000 mgr.vm01.nwhpas (mgr.14227) 99 : cluster [DBG] pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:50.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:50 vm01 bash[28222]: cluster 2026-04-16T19:22:49.268312+0000 mgr.vm01.nwhpas (mgr.14227) 99 : cluster [DBG] pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:50.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:50 vm01 bash[28222]: audit 2026-04-16T19:22:49.758090+0000 mgr.vm01.nwhpas (mgr.14227) 100 : audit [DBG] from='client.14532 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:50.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:50 vm01 bash[28222]: audit 2026-04-16T19:22:49.758090+0000 mgr.vm01.nwhpas (mgr.14227) 100 : audit [DBG] from='client.14532 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.vm01 vm01 *:9093,9094 running (52s) 14s ago 101s 14.9M - 0.28.1 27c475db5fb1 f60e3350c3f7 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter.vm01 vm01 *:9926 running (106s) 14s ago 106s 8259k - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 540e4c218237 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter.vm04 vm04 *:9926 running (64s) 15s ago 64s 8880k - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 761f5560b6de 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:crash.vm01 vm01 running (105s) 14s ago 105s 10.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 ce9564d36680 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:crash.vm04 vm04 running (63s) 15s ago 64s 10.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 7f027725d823 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:grafana.vm01 vm01 *:3000 running (51s) 14s ago 90s 127M - 12.2.0 74144189b384 6b9d88a1a1b5 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:mgr.vm01.nwhpas vm01 *:9283,8765,8443 running (2m) 14s ago 2m 525M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 90f8f68ae65c 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:mgr.vm04.ztqrcx vm04 *:8443,9283,8765 running (62s) 15s ago 62s 469M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 ae4f1b15769f 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:mon.vm01 vm01 running (2m) 14s ago 2m 49.7M 2048M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 7daade271a02 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:mon.vm04 vm04 running (60s) 15s ago 60s 41.5M 2048M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 42f903048de4 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.vm01 vm01 *:9100 running (102s) 14s ago 104s 8387k - 1.9.1 d00a542e409e 87b5ac70aa73 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.vm04 vm04 *:9100 running (61s) 15s ago 63s 7839k - 1.9.1 d00a542e409e 1cb8d0b1bf17 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm04 running (26s) 15s ago 29s 29.5M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 73aed07d4ffd 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (26s) 14s ago 29s 30.4M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b78a9d7ead0 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm04 running (25s) 15s ago 27s 29.6M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 d826511db51b 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (24s) 14s ago 27s 29.7M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 4638771ab976 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm04 running (23s) 15s ago 26s 28.8M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 159489c731a2 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm01 running (22s) 14s ago 25s 41.0M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 5d3c8841868a 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm04 running (21s) 15s ago 24s 26.5M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 cca037a32e13 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm01 running (20s) 14s ago 23s 23.9M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 9aae000c145b 2026-04-16T19:22:50.459 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.vm01 vm01 *:9095 running (50s) 14s ago 84s 28.7M - 3.6.0 76947e7ef22f 52e3cadf80b0 2026-04-16T19:22:50.532 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch ls' 2026-04-16T19:22:50.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:50 vm04 bash[34817]: cluster 2026-04-16T19:22:49.268312+0000 mgr.vm01.nwhpas (mgr.14227) 99 : cluster [DBG] pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:50.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:50 vm04 bash[34817]: cluster 2026-04-16T19:22:49.268312+0000 mgr.vm01.nwhpas (mgr.14227) 99 : cluster [DBG] pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:50.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:50 vm04 bash[34817]: audit 2026-04-16T19:22:49.758090+0000 mgr.vm01.nwhpas (mgr.14227) 100 : audit [DBG] from='client.14532 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:50.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:50 vm04 bash[34817]: audit 2026-04-16T19:22:49.758090+0000 mgr.vm01.nwhpas (mgr.14227) 100 : audit [DBG] from='client.14532 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:50.799 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:51.185 INFO:teuthology.orchestra.run.vm01.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-04-16T19:22:51.185 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager ?:9093,9094 1/1 15s ago 2m count:1 2026-04-16T19:22:51.185 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter ?:9926 2/2 16s ago 2m * 2026-04-16T19:22:51.185 INFO:teuthology.orchestra.run.vm01.stdout:crash 2/2 16s ago 2m * 2026-04-16T19:22:51.185 INFO:teuthology.orchestra.run.vm01.stdout:grafana ?:3000 1/1 15s ago 2m count:1 2026-04-16T19:22:51.185 INFO:teuthology.orchestra.run.vm01.stdout:mgr 2/2 16s ago 2m count:2 2026-04-16T19:22:51.185 INFO:teuthology.orchestra.run.vm01.stdout:mon 2/2 16s ago 103s vm01:192.168.123.101=vm01;vm04:192.168.123.104=vm04;count:2 2026-04-16T19:22:51.185 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter ?:9100 2/2 16s ago 2m * 2026-04-16T19:22:51.186 INFO:teuthology.orchestra.run.vm01.stdout:osd.all-available-devices 8 16s ago 52s * 2026-04-16T19:22:51.186 INFO:teuthology.orchestra.run.vm01.stdout:prometheus ?:9095 1/1 15s ago 2m count:1 2026-04-16T19:22:51.252 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch host ls' 2026-04-16T19:22:51.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:51 vm01 bash[28222]: audit 2026-04-16T19:22:50.453942+0000 mgr.vm01.nwhpas (mgr.14227) 101 : audit [DBG] from='client.14536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:51.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:51 vm01 bash[28222]: audit 2026-04-16T19:22:50.453942+0000 mgr.vm01.nwhpas (mgr.14227) 101 : audit [DBG] from='client.14536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:51.535 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:51.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:51 vm04 bash[34817]: audit 2026-04-16T19:22:50.453942+0000 mgr.vm01.nwhpas (mgr.14227) 101 : audit [DBG] from='client.14536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:51.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:51 vm04 bash[34817]: audit 2026-04-16T19:22:50.453942+0000 mgr.vm01.nwhpas (mgr.14227) 101 : audit [DBG] from='client.14536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:51.915 INFO:teuthology.orchestra.run.vm01.stdout:HOST ADDR LABELS STATUS 2026-04-16T19:22:51.915 INFO:teuthology.orchestra.run.vm01.stdout:vm01 192.168.123.101 2026-04-16T19:22:51.915 INFO:teuthology.orchestra.run.vm01.stdout:vm04 192.168.123.104 2026-04-16T19:22:51.915 INFO:teuthology.orchestra.run.vm01.stdout:2 hosts in cluster 2026-04-16T19:22:51.977 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch device ls' 2026-04-16T19:22:52.233 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:52.607 INFO:teuthology.orchestra.run.vm01.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme0n1 ssd Linux_f0ab57676a82334da227 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme1n1 ssd Linux_498a3ba4a5f7d3afa78b 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme2n1 ssd Linux_018d58381178c5541924 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme3n1 ssd Linux_cb1f69ea567d81122781 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 14s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdb hdd DWNBRSTVMM01001 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdc hdd DWNBRSTVMM01002 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdd hdd DWNBRSTVMM01003 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vde hdd DWNBRSTVMM01004 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme0n1 ssd Linux_29b2bfa9f2fa89bb2491 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme1n1 ssd Linux_61c157a7b2d028903b27 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme2n1 ssd Linux_c8bfb12ab4da82fe6601 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme3n1 ssd Linux_5c94f58d64ed1622992e 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 15s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vdb hdd DWNBRSTVMM04001 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vdc hdd DWNBRSTVMM04002 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vdd hdd DWNBRSTVMM04003 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.608 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vde hdd DWNBRSTVMM04004 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:52.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:52 vm01 bash[28222]: audit 2026-04-16T19:22:51.182045+0000 mgr.vm01.nwhpas (mgr.14227) 102 : audit [DBG] from='client.24309 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:52.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:52 vm01 bash[28222]: audit 2026-04-16T19:22:51.182045+0000 mgr.vm01.nwhpas (mgr.14227) 102 : audit [DBG] from='client.24309 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:52.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:52 vm01 bash[28222]: cluster 2026-04-16T19:22:51.268539+0000 mgr.vm01.nwhpas (mgr.14227) 103 : cluster [DBG] pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:52.621 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:52 vm01 bash[28222]: cluster 2026-04-16T19:22:51.268539+0000 mgr.vm01.nwhpas (mgr.14227) 103 : cluster [DBG] pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:52.685 INFO:teuthology.run_tasks:Running task vip... 2026-04-16T19:22:52.688 INFO:tasks.vip:Allocating static IPs for each host... 2026-04-16T19:22:52.688 INFO:tasks.vip:peername 192.168.123.101 2026-04-16T19:22:52.688 INFO:tasks.vip:192.168.123.101 in 192.168.123.0/24, pos 100 2026-04-16T19:22:52.689 INFO:tasks.vip:vm01.local static 12.12.0.101, vnet 12.12.0.0/22 2026-04-16T19:22:52.689 INFO:tasks.vip:VIPs are [IPv4Address('12.12.1.101')] 2026-04-16T19:22:52.689 DEBUG:teuthology.orchestra.run.vm01:> sudo ip route ls 2026-04-16T19:22:52.698 INFO:teuthology.orchestra.run.vm01.stdout:default via 192.168.123.1 dev ens3 proto dhcp src 192.168.123.101 metric 100 2026-04-16T19:22:52.698 INFO:teuthology.orchestra.run.vm01.stdout:172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown 2026-04-16T19:22:52.698 INFO:teuthology.orchestra.run.vm01.stdout:192.168.123.0/24 dev ens3 proto kernel scope link src 192.168.123.101 metric 100 2026-04-16T19:22:52.698 INFO:teuthology.orchestra.run.vm01.stdout:192.168.123.1 dev ens3 proto dhcp scope link src 192.168.123.101 metric 100 2026-04-16T19:22:52.698 INFO:tasks.vip:Configuring 12.12.0.101 on vm01.local iface ens3... 2026-04-16T19:22:52.699 DEBUG:teuthology.orchestra.run.vm01:> sudo ip addr add 12.12.0.101/22 dev ens3 2026-04-16T19:22:52.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:52 vm04 bash[34817]: audit 2026-04-16T19:22:51.182045+0000 mgr.vm01.nwhpas (mgr.14227) 102 : audit [DBG] from='client.24309 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:52.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:52 vm04 bash[34817]: audit 2026-04-16T19:22:51.182045+0000 mgr.vm01.nwhpas (mgr.14227) 102 : audit [DBG] from='client.24309 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:52.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:52 vm04 bash[34817]: cluster 2026-04-16T19:22:51.268539+0000 mgr.vm01.nwhpas (mgr.14227) 103 : cluster [DBG] pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:52.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:52 vm04 bash[34817]: cluster 2026-04-16T19:22:51.268539+0000 mgr.vm01.nwhpas (mgr.14227) 103 : cluster [DBG] pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:52.752 INFO:tasks.vip:peername 192.168.123.104 2026-04-16T19:22:52.752 INFO:tasks.vip:192.168.123.104 in 192.168.123.0/24, pos 103 2026-04-16T19:22:52.753 INFO:tasks.vip:vm04.local static 12.12.0.104, vnet 12.12.0.0/22 2026-04-16T19:22:52.753 DEBUG:teuthology.orchestra.run.vm04:> sudo ip route ls 2026-04-16T19:22:52.760 INFO:teuthology.orchestra.run.vm04.stdout:default via 192.168.123.1 dev ens3 proto dhcp src 192.168.123.104 metric 100 2026-04-16T19:22:52.760 INFO:teuthology.orchestra.run.vm04.stdout:172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown 2026-04-16T19:22:52.760 INFO:teuthology.orchestra.run.vm04.stdout:192.168.123.0/24 dev ens3 proto kernel scope link src 192.168.123.104 metric 100 2026-04-16T19:22:52.760 INFO:teuthology.orchestra.run.vm04.stdout:192.168.123.1 dev ens3 proto dhcp scope link src 192.168.123.104 metric 100 2026-04-16T19:22:52.761 INFO:tasks.vip:Configuring 12.12.0.104 on vm04.local iface ens3... 2026-04-16T19:22:52.761 DEBUG:teuthology.orchestra.run.vm04:> sudo ip addr add 12.12.0.104/22 dev ens3 2026-04-16T19:22:52.807 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-16T19:22:52.809 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm01.local 2026-04-16T19:22:52.809 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch device ls --refresh' 2026-04-16T19:22:53.076 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:53.465 INFO:teuthology.orchestra.run.vm01.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-04-16T19:22:53.465 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme0n1 ssd Linux_f0ab57676a82334da227 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.465 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme1n1 ssd Linux_498a3ba4a5f7d3afa78b 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.465 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme2n1 ssd Linux_018d58381178c5541924 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.465 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme3n1 ssd Linux_cb1f69ea567d81122781 19.9G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 15s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdb hdd DWNBRSTVMM01001 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdc hdd DWNBRSTVMM01002 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdd hdd DWNBRSTVMM01003 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vde hdd DWNBRSTVMM01004 20.0G No 15s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme0n1 ssd Linux_29b2bfa9f2fa89bb2491 19.9G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme1n1 ssd Linux_61c157a7b2d028903b27 19.9G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme2n1 ssd Linux_c8bfb12ab4da82fe6601 19.9G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme3n1 ssd Linux_5c94f58d64ed1622992e 19.9G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 16s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vdb hdd DWNBRSTVMM04001 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vdc hdd DWNBRSTVMM04002 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vdd hdd DWNBRSTVMM04003 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.466 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vde hdd DWNBRSTVMM04004 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:22:53.613 INFO:teuthology.run_tasks:Running task cephadm.apply... 2026-04-16T19:22:53.632 INFO:tasks.cephadm:Applying spec(s): placement: count: 4 host_pattern: '*' service_id: foo service_type: rgw spec: rgw_frontend_port: 8000 --- placement: count: 2 service_id: rgw.foo service_type: ingress spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: 12.12.1.101/22 2026-04-16T19:22:53.641 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch apply -i - 2026-04-16T19:22:53.956 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:53 vm04 bash[34817]: audit 2026-04-16T19:22:51.914254+0000 mgr.vm01.nwhpas (mgr.14227) 104 : audit [DBG] from='client.14544 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:53 vm04 bash[34817]: audit 2026-04-16T19:22:51.914254+0000 mgr.vm01.nwhpas (mgr.14227) 104 : audit [DBG] from='client.14544 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:53 vm04 bash[34817]: audit 2026-04-16T19:22:52.559374+0000 mon.vm01 (mon.0) 662 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:53 vm04 bash[34817]: audit 2026-04-16T19:22:52.559374+0000 mon.vm01 (mon.0) 662 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:53 vm04 bash[34817]: audit 2026-04-16T19:22:52.605424+0000 mgr.vm01.nwhpas (mgr.14227) 105 : audit [DBG] from='client.14548 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:53 vm04 bash[34817]: audit 2026-04-16T19:22:52.605424+0000 mgr.vm01.nwhpas (mgr.14227) 105 : audit [DBG] from='client.14548 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:53 vm01 bash[28222]: audit 2026-04-16T19:22:51.914254+0000 mgr.vm01.nwhpas (mgr.14227) 104 : audit [DBG] from='client.14544 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:53 vm01 bash[28222]: audit 2026-04-16T19:22:51.914254+0000 mgr.vm01.nwhpas (mgr.14227) 104 : audit [DBG] from='client.14544 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:53 vm01 bash[28222]: audit 2026-04-16T19:22:52.559374+0000 mon.vm01 (mon.0) 662 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:53 vm01 bash[28222]: audit 2026-04-16T19:22:52.559374+0000 mon.vm01 (mon.0) 662 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:53 vm01 bash[28222]: audit 2026-04-16T19:22:52.605424+0000 mgr.vm01.nwhpas (mgr.14227) 105 : audit [DBG] from='client.14548 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:53.966 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:53 vm01 bash[28222]: audit 2026-04-16T19:22:52.605424+0000 mgr.vm01.nwhpas (mgr.14227) 105 : audit [DBG] from='client.14548 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:54.416 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled rgw.foo update... 2026-04-16T19:22:54.416 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled ingress.rgw.foo update... 2026-04-16T19:22:54.486 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-04-16T19:22:54.488 INFO:tasks.cephadm:Waiting for ceph service rgw.foo to start (timeout 300)... 2026-04-16T19:22:54.489 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:22:54.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:54 vm01 bash[28222]: cluster 2026-04-16T19:22:53.268797+0000 mgr.vm01.nwhpas (mgr.14227) 106 : cluster [DBG] pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:54.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:54 vm01 bash[28222]: cluster 2026-04-16T19:22:53.268797+0000 mgr.vm01.nwhpas (mgr.14227) 106 : cluster [DBG] pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:54.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:54 vm01 bash[28222]: audit 2026-04-16T19:22:53.463044+0000 mgr.vm01.nwhpas (mgr.14227) 107 : audit [DBG] from='client.14552 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:54.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:54 vm01 bash[28222]: audit 2026-04-16T19:22:53.463044+0000 mgr.vm01.nwhpas (mgr.14227) 107 : audit [DBG] from='client.14552 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:54.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:54 vm01 bash[28222]: audit 2026-04-16T19:22:53.472753+0000 mon.vm01 (mon.0) 663 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:54.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:54 vm01 bash[28222]: audit 2026-04-16T19:22:53.472753+0000 mon.vm01 (mon.0) 663 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:54.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:54 vm01 bash[28222]: audit 2026-04-16T19:22:54.409813+0000 mon.vm01 (mon.0) 664 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:54.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:54 vm01 bash[28222]: audit 2026-04-16T19:22:54.409813+0000 mon.vm01 (mon.0) 664 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:54.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:54 vm01 bash[28222]: audit 2026-04-16T19:22:54.415190+0000 mon.vm01 (mon.0) 665 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:54.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:54 vm01 bash[28222]: audit 2026-04-16T19:22:54.415190+0000 mon.vm01 (mon.0) 665 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:54.759 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:54 vm04 bash[34817]: cluster 2026-04-16T19:22:53.268797+0000 mgr.vm01.nwhpas (mgr.14227) 106 : cluster [DBG] pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:54 vm04 bash[34817]: cluster 2026-04-16T19:22:53.268797+0000 mgr.vm01.nwhpas (mgr.14227) 106 : cluster [DBG] pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:54 vm04 bash[34817]: audit 2026-04-16T19:22:53.463044+0000 mgr.vm01.nwhpas (mgr.14227) 107 : audit [DBG] from='client.14552 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:54 vm04 bash[34817]: audit 2026-04-16T19:22:53.463044+0000 mgr.vm01.nwhpas (mgr.14227) 107 : audit [DBG] from='client.14552 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:54 vm04 bash[34817]: audit 2026-04-16T19:22:53.472753+0000 mon.vm01 (mon.0) 663 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:54 vm04 bash[34817]: audit 2026-04-16T19:22:53.472753+0000 mon.vm01 (mon.0) 663 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:22:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:54 vm04 bash[34817]: audit 2026-04-16T19:22:54.409813+0000 mon.vm01 (mon.0) 664 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:54 vm04 bash[34817]: audit 2026-04-16T19:22:54.409813+0000 mon.vm01 (mon.0) 664 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:54 vm04 bash[34817]: audit 2026-04-16T19:22:54.415190+0000 mon.vm01 (mon.0) 665 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:54 vm04 bash[34817]: audit 2026-04-16T19:22:54.415190+0000 mon.vm01 (mon.0) 665 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:55.157 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:55.158 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:35.815288Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:34.785191Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:34.785482Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:35.815350Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.415340Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:34.785375Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:34.785284Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:34.785100Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:34.785153Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:35.815506Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.410064Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:22:54.402487Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-16T19:22:55.224 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:22:55.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:55 vm04 bash[34817]: audit 2026-04-16T19:22:54.400206+0000 mgr.vm01.nwhpas (mgr.14227) 108 : audit [DBG] from='client.14556 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:55.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:55 vm04 bash[34817]: audit 2026-04-16T19:22:54.400206+0000 mgr.vm01.nwhpas (mgr.14227) 108 : audit [DBG] from='client.14556 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:55.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:55 vm04 bash[34817]: cephadm 2026-04-16T19:22:54.402475+0000 mgr.vm01.nwhpas (mgr.14227) 109 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-16T19:22:55.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:55 vm04 bash[34817]: cephadm 2026-04-16T19:22:54.402475+0000 mgr.vm01.nwhpas (mgr.14227) 109 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-16T19:22:55.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:55 vm04 bash[34817]: cephadm 2026-04-16T19:22:54.410220+0000 mgr.vm01.nwhpas (mgr.14227) 110 : cephadm [INF] Saving service ingress.rgw.foo spec with placement count:2 2026-04-16T19:22:55.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:55 vm04 bash[34817]: cephadm 2026-04-16T19:22:54.410220+0000 mgr.vm01.nwhpas (mgr.14227) 110 : cephadm [INF] Saving service ingress.rgw.foo spec with placement count:2 2026-04-16T19:22:55.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:55 vm01 bash[28222]: audit 2026-04-16T19:22:54.400206+0000 mgr.vm01.nwhpas (mgr.14227) 108 : audit [DBG] from='client.14556 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:55.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:55 vm01 bash[28222]: audit 2026-04-16T19:22:54.400206+0000 mgr.vm01.nwhpas (mgr.14227) 108 : audit [DBG] from='client.14556 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:22:55.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:55 vm01 bash[28222]: cephadm 2026-04-16T19:22:54.402475+0000 mgr.vm01.nwhpas (mgr.14227) 109 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-16T19:22:55.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:55 vm01 bash[28222]: cephadm 2026-04-16T19:22:54.402475+0000 mgr.vm01.nwhpas (mgr.14227) 109 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-16T19:22:55.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:55 vm01 bash[28222]: cephadm 2026-04-16T19:22:54.410220+0000 mgr.vm01.nwhpas (mgr.14227) 110 : cephadm [INF] Saving service ingress.rgw.foo spec with placement count:2 2026-04-16T19:22:55.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:55 vm01 bash[28222]: cephadm 2026-04-16T19:22:54.410220+0000 mgr.vm01.nwhpas (mgr.14227) 110 : cephadm [INF] Saving service ingress.rgw.foo spec with placement count:2 2026-04-16T19:22:56.224 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:22:56.511 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:56.911 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:56.911 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:35.815288Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:34.785191Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:34.785482Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:35.815350Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.415340Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:34.785375Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:34.785284Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:34.785100Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:34.785153Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:35.815506Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.410064Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:22:54.402487Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-16T19:22:56.924 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:56 vm01 bash[28222]: audit 2026-04-16T19:22:55.155258+0000 mgr.vm01.nwhpas (mgr.14227) 111 : audit [DBG] from='client.14560 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:56.924 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:56 vm01 bash[28222]: audit 2026-04-16T19:22:55.155258+0000 mgr.vm01.nwhpas (mgr.14227) 111 : audit [DBG] from='client.14560 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:56.924 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:56 vm01 bash[28222]: cluster 2026-04-16T19:22:55.269108+0000 mgr.vm01.nwhpas (mgr.14227) 112 : cluster [DBG] pgmap v51: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:56.924 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:56 vm01 bash[28222]: cluster 2026-04-16T19:22:55.269108+0000 mgr.vm01.nwhpas (mgr.14227) 112 : cluster [DBG] pgmap v51: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:56.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:56 vm04 bash[34817]: audit 2026-04-16T19:22:55.155258+0000 mgr.vm01.nwhpas (mgr.14227) 111 : audit [DBG] from='client.14560 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:56.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:56 vm04 bash[34817]: audit 2026-04-16T19:22:55.155258+0000 mgr.vm01.nwhpas (mgr.14227) 111 : audit [DBG] from='client.14560 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:56.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:56 vm04 bash[34817]: cluster 2026-04-16T19:22:55.269108+0000 mgr.vm01.nwhpas (mgr.14227) 112 : cluster [DBG] pgmap v51: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:56.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:56 vm04 bash[34817]: cluster 2026-04-16T19:22:55.269108+0000 mgr.vm01.nwhpas (mgr.14227) 112 : cluster [DBG] pgmap v51: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:56.992 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:22:57.993 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:22:58.294 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:22:58.716 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:58 vm01 bash[28222]: audit 2026-04-16T19:22:56.908131+0000 mgr.vm01.nwhpas (mgr.14227) 113 : audit [DBG] from='client.14564 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:58.716 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:58 vm01 bash[28222]: audit 2026-04-16T19:22:56.908131+0000 mgr.vm01.nwhpas (mgr.14227) 113 : audit [DBG] from='client.14564 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:58.716 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:58 vm01 bash[28222]: cluster 2026-04-16T19:22:57.269467+0000 mgr.vm01.nwhpas (mgr.14227) 114 : cluster [DBG] pgmap v52: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:58.717 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:58 vm01 bash[28222]: cluster 2026-04-16T19:22:57.269467+0000 mgr.vm01.nwhpas (mgr.14227) 114 : cluster [DBG] pgmap v52: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:58.717 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:58 vm01 bash[28222]: audit 2026-04-16T19:22:58.191911+0000 mon.vm01 (mon.0) 666 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:58.717 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:58 vm01 bash[28222]: audit 2026-04-16T19:22:58.191911+0000 mon.vm01 (mon.0) 666 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:58.717 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:58 vm01 bash[28222]: audit 2026-04-16T19:22:58.197837+0000 mon.vm01 (mon.0) 667 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:58.717 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:58 vm01 bash[28222]: audit 2026-04-16T19:22:58.197837+0000 mon.vm01 (mon.0) 667 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:58.722 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:22:58.722 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:35.815288Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:35.815159Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:35.815119Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:35.815350Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.415340Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:35.815247Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:35.815073Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:35.815463Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:35.814953Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:35.815506Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.410064Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:22:54.402487Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-16T19:22:58.826 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:22:58.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:58 vm04 bash[34817]: audit 2026-04-16T19:22:56.908131+0000 mgr.vm01.nwhpas (mgr.14227) 113 : audit [DBG] from='client.14564 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:58.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:58 vm04 bash[34817]: audit 2026-04-16T19:22:56.908131+0000 mgr.vm01.nwhpas (mgr.14227) 113 : audit [DBG] from='client.14564 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:22:58.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:58 vm04 bash[34817]: cluster 2026-04-16T19:22:57.269467+0000 mgr.vm01.nwhpas (mgr.14227) 114 : cluster [DBG] pgmap v52: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:58.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:58 vm04 bash[34817]: cluster 2026-04-16T19:22:57.269467+0000 mgr.vm01.nwhpas (mgr.14227) 114 : cluster [DBG] pgmap v52: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:22:58.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:58 vm04 bash[34817]: audit 2026-04-16T19:22:58.191911+0000 mon.vm01 (mon.0) 666 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:58.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:58 vm04 bash[34817]: audit 2026-04-16T19:22:58.191911+0000 mon.vm01 (mon.0) 666 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:58.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:58 vm04 bash[34817]: audit 2026-04-16T19:22:58.197837+0000 mon.vm01 (mon.0) 667 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:58.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:58 vm04 bash[34817]: audit 2026-04-16T19:22:58.197837+0000 mon.vm01 (mon.0) 667 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:22:59.827 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:00.139 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:58.720127+0000 mgr.vm01.nwhpas (mgr.14227) 115 : audit [DBG] from='client.14566 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:58.720127+0000 mgr.vm01.nwhpas (mgr.14227) 115 : audit [DBG] from='client.14566 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:58.792194+0000 mon.vm01 (mon.0) 668 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:58.792194+0000 mon.vm01 (mon.0) 668 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:58.801019+0000 mon.vm01 (mon.0) 669 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:58.801019+0000 mon.vm01 (mon.0) 669 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:58.909369+0000 mon.vm01 (mon.0) 670 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:58.909369+0000 mon.vm01 (mon.0) 670 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:58.915497+0000 mon.vm01 (mon.0) 671 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:58.915497+0000 mon.vm01 (mon.0) 671 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:59.502025+0000 mon.vm01 (mon.0) 672 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:59.502025+0000 mon.vm01 (mon.0) 672 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:59.508027+0000 mon.vm01 (mon.0) 673 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.195 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:22:59 vm01 bash[28222]: audit 2026-04-16T19:22:59.508027+0000 mon.vm01 (mon.0) 673 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:58.720127+0000 mgr.vm01.nwhpas (mgr.14227) 115 : audit [DBG] from='client.14566 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:58.720127+0000 mgr.vm01.nwhpas (mgr.14227) 115 : audit [DBG] from='client.14566 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:58.792194+0000 mon.vm01 (mon.0) 668 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:58.792194+0000 mon.vm01 (mon.0) 668 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:58.801019+0000 mon.vm01 (mon.0) 669 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:58.801019+0000 mon.vm01 (mon.0) 669 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:58.909369+0000 mon.vm01 (mon.0) 670 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:58.909369+0000 mon.vm01 (mon.0) 670 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:58.915497+0000 mon.vm01 (mon.0) 671 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:58.915497+0000 mon.vm01 (mon.0) 671 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:59.502025+0000 mon.vm01 (mon.0) 672 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:59.502025+0000 mon.vm01 (mon.0) 672 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:59.508027+0000 mon.vm01 (mon.0) 673 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:22:59 vm04 bash[34817]: audit 2026-04-16T19:22:59.508027+0000 mon.vm01 (mon.0) 673 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:00.547 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:00.547 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.415340Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.410064Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:22:54.402487Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-16T19:23:00.646 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:00.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:00 vm01 bash[28222]: cluster 2026-04-16T19:22:59.269842+0000 mgr.vm01.nwhpas (mgr.14227) 116 : cluster [DBG] pgmap v53: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:00.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:00 vm01 bash[28222]: cluster 2026-04-16T19:22:59.269842+0000 mgr.vm01.nwhpas (mgr.14227) 116 : cluster [DBG] pgmap v53: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:01.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:00 vm04 bash[34817]: cluster 2026-04-16T19:22:59.269842+0000 mgr.vm01.nwhpas (mgr.14227) 116 : cluster [DBG] pgmap v53: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:01.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:00 vm04 bash[34817]: cluster 2026-04-16T19:22:59.269842+0000 mgr.vm01.nwhpas (mgr.14227) 116 : cluster [DBG] pgmap v53: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:01.647 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:01.934 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:00.543629+0000 mgr.vm01.nwhpas (mgr.14227) 117 : audit [DBG] from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:00.543629+0000 mgr.vm01.nwhpas (mgr.14227) 117 : audit [DBG] from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:00.845538+0000 mon.vm01 (mon.0) 674 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:00.845538+0000 mon.vm01 (mon.0) 674 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:00.850752+0000 mon.vm01 (mon.0) 675 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:00.850752+0000 mon.vm01 (mon.0) 675 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.479824+0000 mon.vm01 (mon.0) 676 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.479824+0000 mon.vm01 (mon.0) 676 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.485063+0000 mon.vm01 (mon.0) 677 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.485063+0000 mon.vm01 (mon.0) 677 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.485804+0000 mon.vm01 (mon.0) 678 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.485804+0000 mon.vm01 (mon.0) 678 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.486274+0000 mon.vm01 (mon.0) 679 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.486274+0000 mon.vm01 (mon.0) 679 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.489739+0000 mon.vm01 (mon.0) 680 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.953 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.489739+0000 mon.vm01 (mon.0) 680 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.491017+0000 mon.vm01 (mon.0) 681 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:01.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.491017+0000 mon.vm01 (mon.0) 681 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:01.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.494279+0000 mon.vm01 (mon.0) 682 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.uxumrv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:01.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.494279+0000 mon.vm01 (mon.0) 682 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.uxumrv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:01.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.496225+0000 mon.vm01 (mon.0) 683 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.uxumrv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:01.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.496225+0000 mon.vm01 (mon.0) 683 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.uxumrv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:01.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.499992+0000 mon.vm01 (mon.0) 684 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.499992+0000 mon.vm01 (mon.0) 684 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:01.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.501125+0000 mon.vm01 (mon.0) 685 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:01.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:01 vm01 bash[28222]: audit 2026-04-16T19:23:01.501125+0000 mon.vm01 (mon.0) 685 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:00.543629+0000 mgr.vm01.nwhpas (mgr.14227) 117 : audit [DBG] from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:00.543629+0000 mgr.vm01.nwhpas (mgr.14227) 117 : audit [DBG] from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:00.845538+0000 mon.vm01 (mon.0) 674 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:00.845538+0000 mon.vm01 (mon.0) 674 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:00.850752+0000 mon.vm01 (mon.0) 675 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:00.850752+0000 mon.vm01 (mon.0) 675 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.479824+0000 mon.vm01 (mon.0) 676 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.479824+0000 mon.vm01 (mon.0) 676 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.485063+0000 mon.vm01 (mon.0) 677 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.485063+0000 mon.vm01 (mon.0) 677 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.485804+0000 mon.vm01 (mon.0) 678 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.485804+0000 mon.vm01 (mon.0) 678 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.486274+0000 mon.vm01 (mon.0) 679 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.486274+0000 mon.vm01 (mon.0) 679 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.489739+0000 mon.vm01 (mon.0) 680 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.489739+0000 mon.vm01 (mon.0) 680 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.491017+0000 mon.vm01 (mon.0) 681 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.491017+0000 mon.vm01 (mon.0) 681 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.494279+0000 mon.vm01 (mon.0) 682 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.uxumrv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.494279+0000 mon.vm01 (mon.0) 682 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.uxumrv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.496225+0000 mon.vm01 (mon.0) 683 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.uxumrv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.496225+0000 mon.vm01 (mon.0) 683 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.uxumrv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.499992+0000 mon.vm01 (mon.0) 684 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.499992+0000 mon.vm01 (mon.0) 684 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.501125+0000 mon.vm01 (mon.0) 685 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:02.052 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:01 vm04 bash[34817]: audit 2026-04-16T19:23:01.501125+0000 mon.vm01 (mon.0) 685 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:02.324 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:02.325 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.415340Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.410064Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:22:54.402487Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-16T19:23:02.361 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:02.361 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:02.394 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:02.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: cluster 2026-04-16T19:23:01.270184+0000 mgr.vm01.nwhpas (mgr.14227) 118 : cluster [DBG] pgmap v54: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:02.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: cluster 2026-04-16T19:23:01.270184+0000 mgr.vm01.nwhpas (mgr.14227) 118 : cluster [DBG] pgmap v54: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:02.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: cephadm 2026-04-16T19:23:01.501606+0000 mgr.vm01.nwhpas (mgr.14227) 119 : cephadm [INF] Deploying daemon rgw.foo.vm04.uxumrv on vm04 2026-04-16T19:23:02.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: cephadm 2026-04-16T19:23:01.501606+0000 mgr.vm01.nwhpas (mgr.14227) 119 : cephadm [INF] Deploying daemon rgw.foo.vm04.uxumrv on vm04 2026-04-16T19:23:02.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.398801+0000 mon.vm01 (mon.0) 686 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.398801+0000 mon.vm01 (mon.0) 686 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.403197+0000 mon.vm01 (mon.0) 687 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.403197+0000 mon.vm01 (mon.0) 687 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.406286+0000 mon.vm01 (mon.0) 688 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.406286+0000 mon.vm01 (mon.0) 688 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.406751+0000 mon.vm01 (mon.0) 689 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.pktgwy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.406751+0000 mon.vm01 (mon.0) 689 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.pktgwy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.408686+0000 mon.vm01 (mon.0) 690 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.pktgwy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.408686+0000 mon.vm01 (mon.0) 690 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.pktgwy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.413971+0000 mon.vm01 (mon.0) 691 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.413971+0000 mon.vm01 (mon.0) 691 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.415049+0000 mon.vm01 (mon.0) 692 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:02.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 bash[28222]: audit 2026-04-16T19:23:02.415049+0000 mon.vm01 (mon.0) 692 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:03.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: cluster 2026-04-16T19:23:01.270184+0000 mgr.vm01.nwhpas (mgr.14227) 118 : cluster [DBG] pgmap v54: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:03.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: cluster 2026-04-16T19:23:01.270184+0000 mgr.vm01.nwhpas (mgr.14227) 118 : cluster [DBG] pgmap v54: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:03.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: cephadm 2026-04-16T19:23:01.501606+0000 mgr.vm01.nwhpas (mgr.14227) 119 : cephadm [INF] Deploying daemon rgw.foo.vm04.uxumrv on vm04 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: cephadm 2026-04-16T19:23:01.501606+0000 mgr.vm01.nwhpas (mgr.14227) 119 : cephadm [INF] Deploying daemon rgw.foo.vm04.uxumrv on vm04 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.398801+0000 mon.vm01 (mon.0) 686 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.398801+0000 mon.vm01 (mon.0) 686 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.403197+0000 mon.vm01 (mon.0) 687 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.403197+0000 mon.vm01 (mon.0) 687 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.406286+0000 mon.vm01 (mon.0) 688 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.406286+0000 mon.vm01 (mon.0) 688 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.406751+0000 mon.vm01 (mon.0) 689 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.pktgwy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.406751+0000 mon.vm01 (mon.0) 689 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.pktgwy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.408686+0000 mon.vm01 (mon.0) 690 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.pktgwy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.408686+0000 mon.vm01 (mon.0) 690 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.pktgwy", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.413971+0000 mon.vm01 (mon.0) 691 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.413971+0000 mon.vm01 (mon.0) 691 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.415049+0000 mon.vm01 (mon.0) 692 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:03.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:02 vm04 bash[34817]: audit 2026-04-16T19:23:02.415049+0000 mon.vm01 (mon.0) 692 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:03.225 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:02 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:03.225 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:03.395 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:03.663 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:03.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:02.321401+0000 mgr.vm01.nwhpas (mgr.14227) 120 : audit [DBG] from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:03.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:02.321401+0000 mgr.vm01.nwhpas (mgr.14227) 120 : audit [DBG] from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:03.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: cephadm 2026-04-16T19:23:02.415584+0000 mgr.vm01.nwhpas (mgr.14227) 121 : cephadm [INF] Deploying daemon rgw.foo.vm01.pktgwy on vm01 2026-04-16T19:23:03.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: cephadm 2026-04-16T19:23:02.415584+0000 mgr.vm01.nwhpas (mgr.14227) 121 : cephadm [INF] Deploying daemon rgw.foo.vm01.pktgwy on vm01 2026-04-16T19:23:03.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.353259+0000 mon.vm01 (mon.0) 693 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.353259+0000 mon.vm01 (mon.0) 693 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.358728+0000 mon.vm01 (mon.0) 694 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.358728+0000 mon.vm01 (mon.0) 694 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.365401+0000 mon.vm01 (mon.0) 695 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.365401+0000 mon.vm01 (mon.0) 695 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.365916+0000 mon.vm01 (mon.0) 696 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.rpimxa", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.365916+0000 mon.vm01 (mon.0) 696 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.rpimxa", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.368706+0000 mon.vm01 (mon.0) 697 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.rpimxa", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.368706+0000 mon.vm01 (mon.0) 697 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.rpimxa", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.375562+0000 mon.vm01 (mon.0) 698 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.375562+0000 mon.vm01 (mon.0) 698 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.377572+0000 mon.vm01 (mon.0) 699 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.377572+0000 mon.vm01 (mon.0) 699 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: cluster 2026-04-16T19:23:03.420735+0000 mon.vm01 (mon.0) 700 : cluster [DBG] osdmap e25: 8 total, 8 up, 8 in 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: cluster 2026-04-16T19:23:03.420735+0000 mon.vm01 (mon.0) 700 : cluster [DBG] osdmap e25: 8 total, 8 up, 8 in 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.422067+0000 mon.vm04 (mon.1) 26 : audit [INF] from='client.? 192.168.123.104:0/947338226' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.422067+0000 mon.vm04 (mon.1) 26 : audit [INF] from='client.? 192.168.123.104:0/947338226' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.423186+0000 mon.vm01 (mon.0) 701 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-16T19:23:03.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:03 vm04 bash[34817]: audit 2026-04-16T19:23:03.423186+0000 mon.vm01 (mon.0) 701 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:02.321401+0000 mgr.vm01.nwhpas (mgr.14227) 120 : audit [DBG] from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:02.321401+0000 mgr.vm01.nwhpas (mgr.14227) 120 : audit [DBG] from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: cephadm 2026-04-16T19:23:02.415584+0000 mgr.vm01.nwhpas (mgr.14227) 121 : cephadm [INF] Deploying daemon rgw.foo.vm01.pktgwy on vm01 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: cephadm 2026-04-16T19:23:02.415584+0000 mgr.vm01.nwhpas (mgr.14227) 121 : cephadm [INF] Deploying daemon rgw.foo.vm01.pktgwy on vm01 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.353259+0000 mon.vm01 (mon.0) 693 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.353259+0000 mon.vm01 (mon.0) 693 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.358728+0000 mon.vm01 (mon.0) 694 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.358728+0000 mon.vm01 (mon.0) 694 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.365401+0000 mon.vm01 (mon.0) 695 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.365401+0000 mon.vm01 (mon.0) 695 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.365916+0000 mon.vm01 (mon.0) 696 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.rpimxa", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.365916+0000 mon.vm01 (mon.0) 696 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.rpimxa", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.368706+0000 mon.vm01 (mon.0) 697 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.rpimxa", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.368706+0000 mon.vm01 (mon.0) 697 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.rpimxa", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.375562+0000 mon.vm01 (mon.0) 698 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.375562+0000 mon.vm01 (mon.0) 698 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.377572+0000 mon.vm01 (mon.0) 699 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.377572+0000 mon.vm01 (mon.0) 699 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: cluster 2026-04-16T19:23:03.420735+0000 mon.vm01 (mon.0) 700 : cluster [DBG] osdmap e25: 8 total, 8 up, 8 in 2026-04-16T19:23:03.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: cluster 2026-04-16T19:23:03.420735+0000 mon.vm01 (mon.0) 700 : cluster [DBG] osdmap e25: 8 total, 8 up, 8 in 2026-04-16T19:23:03.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.422067+0000 mon.vm04 (mon.1) 26 : audit [INF] from='client.? 192.168.123.104:0/947338226' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-16T19:23:03.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.422067+0000 mon.vm04 (mon.1) 26 : audit [INF] from='client.? 192.168.123.104:0/947338226' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-16T19:23:03.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.423186+0000 mon.vm01 (mon.0) 701 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-16T19:23:03.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:03 vm01 bash[28222]: audit 2026-04-16T19:23:03.423186+0000 mon.vm01 (mon.0) 701 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-16T19:23:04.086 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:04.086 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.415340Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:03.365583Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:22:54.402487Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-16T19:23:04.167 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:04.258 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:04.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:05.033 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: cluster 2026-04-16T19:23:03.270614+0000 mgr.vm01.nwhpas (mgr.14227) 122 : cluster [DBG] pgmap v55: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: cluster 2026-04-16T19:23:03.270614+0000 mgr.vm01.nwhpas (mgr.14227) 122 : cluster [DBG] pgmap v55: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: cephadm 2026-04-16T19:23:03.378286+0000 mgr.vm01.nwhpas (mgr.14227) 123 : cephadm [INF] Deploying daemon rgw.foo.vm04.rpimxa on vm04 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: cephadm 2026-04-16T19:23:03.378286+0000 mgr.vm01.nwhpas (mgr.14227) 123 : cephadm [INF] Deploying daemon rgw.foo.vm04.rpimxa on vm04 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.082399+0000 mon.vm01 (mon.0) 702 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.082399+0000 mon.vm01 (mon.0) 702 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.083824+0000 mon.vm01 (mon.0) 703 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.083824+0000 mon.vm01 (mon.0) 703 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.381598+0000 mon.vm01 (mon.0) 704 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.381598+0000 mon.vm01 (mon.0) 704 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.387253+0000 mon.vm01 (mon.0) 705 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.387253+0000 mon.vm01 (mon.0) 705 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.390321+0000 mon.vm01 (mon.0) 706 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.390321+0000 mon.vm01 (mon.0) 706 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.390862+0000 mon.vm01 (mon.0) 707 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.qgurbb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.390862+0000 mon.vm01 (mon.0) 707 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.qgurbb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.392838+0000 mon.vm01 (mon.0) 708 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.qgurbb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.392838+0000 mon.vm01 (mon.0) 708 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.qgurbb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.396120+0000 mon.vm01 (mon.0) 709 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.396120+0000 mon.vm01 (mon.0) 709 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.397226+0000 mon.vm01 (mon.0) 710 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.397226+0000 mon.vm01 (mon.0) 710 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.420414+0000 mon.vm01 (mon.0) 711 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: audit 2026-04-16T19:23:04.420414+0000 mon.vm01 (mon.0) 711 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: cluster 2026-04-16T19:23:04.422619+0000 mon.vm01 (mon.0) 712 : cluster [DBG] osdmap e26: 8 total, 8 up, 8 in 2026-04-16T19:23:05.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:04 vm01 bash[28222]: cluster 2026-04-16T19:23:04.422619+0000 mon.vm01 (mon.0) 712 : cluster [DBG] osdmap e26: 8 total, 8 up, 8 in 2026-04-16T19:23:05.168 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:05.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: cluster 2026-04-16T19:23:03.270614+0000 mgr.vm01.nwhpas (mgr.14227) 122 : cluster [DBG] pgmap v55: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:05.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: cluster 2026-04-16T19:23:03.270614+0000 mgr.vm01.nwhpas (mgr.14227) 122 : cluster [DBG] pgmap v55: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:05.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: cephadm 2026-04-16T19:23:03.378286+0000 mgr.vm01.nwhpas (mgr.14227) 123 : cephadm [INF] Deploying daemon rgw.foo.vm04.rpimxa on vm04 2026-04-16T19:23:05.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: cephadm 2026-04-16T19:23:03.378286+0000 mgr.vm01.nwhpas (mgr.14227) 123 : cephadm [INF] Deploying daemon rgw.foo.vm04.rpimxa on vm04 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.082399+0000 mon.vm01 (mon.0) 702 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.082399+0000 mon.vm01 (mon.0) 702 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.083824+0000 mon.vm01 (mon.0) 703 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.083824+0000 mon.vm01 (mon.0) 703 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.381598+0000 mon.vm01 (mon.0) 704 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.381598+0000 mon.vm01 (mon.0) 704 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.387253+0000 mon.vm01 (mon.0) 705 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.387253+0000 mon.vm01 (mon.0) 705 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.390321+0000 mon.vm01 (mon.0) 706 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.390321+0000 mon.vm01 (mon.0) 706 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.390862+0000 mon.vm01 (mon.0) 707 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.qgurbb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.390862+0000 mon.vm01 (mon.0) 707 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.qgurbb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.392838+0000 mon.vm01 (mon.0) 708 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.qgurbb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.392838+0000 mon.vm01 (mon.0) 708 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm01.qgurbb", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.396120+0000 mon.vm01 (mon.0) 709 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.396120+0000 mon.vm01 (mon.0) 709 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.397226+0000 mon.vm01 (mon.0) 710 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.397226+0000 mon.vm01 (mon.0) 710 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.420414+0000 mon.vm01 (mon.0) 711 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: audit 2026-04-16T19:23:04.420414+0000 mon.vm01 (mon.0) 711 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: cluster 2026-04-16T19:23:04.422619+0000 mon.vm01 (mon.0) 712 : cluster [DBG] osdmap e26: 8 total, 8 up, 8 in 2026-04-16T19:23:05.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:04 vm04 bash[34817]: cluster 2026-04-16T19:23:04.422619+0000 mon.vm01 (mon.0) 712 : cluster [DBG] osdmap e26: 8 total, 8 up, 8 in 2026-04-16T19:23:05.288 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:05.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:05.471 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:05.932 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:05.932 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.415340Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:06.011 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:06.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:04.081592+0000 mgr.vm01.nwhpas (mgr.14227) 124 : audit [DBG] from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:06.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:04.081592+0000 mgr.vm01.nwhpas (mgr.14227) 124 : audit [DBG] from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:06.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: cephadm 2026-04-16T19:23:04.397850+0000 mgr.vm01.nwhpas (mgr.14227) 125 : cephadm [INF] Deploying daemon rgw.foo.vm01.qgurbb on vm01 2026-04-16T19:23:06.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: cephadm 2026-04-16T19:23:04.397850+0000 mgr.vm01.nwhpas (mgr.14227) 125 : cephadm [INF] Deploying daemon rgw.foo.vm01.qgurbb on vm01 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: cluster 2026-04-16T19:23:05.270909+0000 mgr.vm01.nwhpas (mgr.14227) 126 : cluster [DBG] pgmap v58: 33 pgs: 15 creating+peering, 14 unknown, 4 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: cluster 2026-04-16T19:23:05.270909+0000 mgr.vm01.nwhpas (mgr.14227) 126 : cluster [DBG] pgmap v58: 33 pgs: 15 creating+peering, 14 unknown, 4 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: cluster 2026-04-16T19:23:05.426420+0000 mon.vm01 (mon.0) 713 : cluster [DBG] osdmap e27: 8 total, 8 up, 8 in 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: cluster 2026-04-16T19:23:05.426420+0000 mon.vm01 (mon.0) 713 : cluster [DBG] osdmap e27: 8 total, 8 up, 8 in 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.429138+0000 mon.vm01 (mon.0) 714 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.429138+0000 mon.vm01 (mon.0) 714 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.431718+0000 mon.vm04 (mon.1) 27 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.431718+0000 mon.vm04 (mon.1) 27 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.431830+0000 mon.vm04 (mon.1) 28 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.431830+0000 mon.vm04 (mon.1) 28 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.439627+0000 mon.vm01 (mon.0) 715 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.439627+0000 mon.vm01 (mon.0) 715 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.439768+0000 mon.vm01 (mon.0) 716 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.439768+0000 mon.vm01 (mon.0) 716 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.451939+0000 mon.vm01 (mon.0) 717 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.451939+0000 mon.vm01 (mon.0) 717 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.464036+0000 mon.vm01 (mon.0) 718 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.464036+0000 mon.vm01 (mon.0) 718 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.479814+0000 mon.vm01 (mon.0) 719 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.479814+0000 mon.vm01 (mon.0) 719 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: cephadm 2026-04-16T19:23:05.480480+0000 mgr.vm01.nwhpas (mgr.14227) 127 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: cephadm 2026-04-16T19:23:05.480480+0000 mgr.vm01.nwhpas (mgr.14227) 127 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.484233+0000 mon.vm01 (mon.0) 720 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.484233+0000 mon.vm01 (mon.0) 720 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.491798+0000 mon.vm01 (mon.0) 721 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.491798+0000 mon.vm01 (mon.0) 721 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.497257+0000 mon.vm01 (mon.0) 722 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: audit 2026-04-16T19:23:05.497257+0000 mon.vm01 (mon.0) 722 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: cephadm 2026-04-16T19:23:05.503051+0000 mgr.vm01.nwhpas (mgr.14227) 128 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm04.bfwsbq on vm04 2026-04-16T19:23:06.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:05 vm04 bash[34817]: cephadm 2026-04-16T19:23:05.503051+0000 mgr.vm01.nwhpas (mgr.14227) 128 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm04.bfwsbq on vm04 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:04.081592+0000 mgr.vm01.nwhpas (mgr.14227) 124 : audit [DBG] from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:04.081592+0000 mgr.vm01.nwhpas (mgr.14227) 124 : audit [DBG] from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: cephadm 2026-04-16T19:23:04.397850+0000 mgr.vm01.nwhpas (mgr.14227) 125 : cephadm [INF] Deploying daemon rgw.foo.vm01.qgurbb on vm01 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: cephadm 2026-04-16T19:23:04.397850+0000 mgr.vm01.nwhpas (mgr.14227) 125 : cephadm [INF] Deploying daemon rgw.foo.vm01.qgurbb on vm01 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: cluster 2026-04-16T19:23:05.270909+0000 mgr.vm01.nwhpas (mgr.14227) 126 : cluster [DBG] pgmap v58: 33 pgs: 15 creating+peering, 14 unknown, 4 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: cluster 2026-04-16T19:23:05.270909+0000 mgr.vm01.nwhpas (mgr.14227) 126 : cluster [DBG] pgmap v58: 33 pgs: 15 creating+peering, 14 unknown, 4 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: cluster 2026-04-16T19:23:05.426420+0000 mon.vm01 (mon.0) 713 : cluster [DBG] osdmap e27: 8 total, 8 up, 8 in 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: cluster 2026-04-16T19:23:05.426420+0000 mon.vm01 (mon.0) 713 : cluster [DBG] osdmap e27: 8 total, 8 up, 8 in 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.429138+0000 mon.vm01 (mon.0) 714 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.429138+0000 mon.vm01 (mon.0) 714 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.431718+0000 mon.vm04 (mon.1) 27 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.431718+0000 mon.vm04 (mon.1) 27 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.431830+0000 mon.vm04 (mon.1) 28 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.431830+0000 mon.vm04 (mon.1) 28 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.439627+0000 mon.vm01 (mon.0) 715 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.439627+0000 mon.vm01 (mon.0) 715 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.439768+0000 mon.vm01 (mon.0) 716 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.439768+0000 mon.vm01 (mon.0) 716 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.451939+0000 mon.vm01 (mon.0) 717 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.451939+0000 mon.vm01 (mon.0) 717 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.464036+0000 mon.vm01 (mon.0) 718 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.464036+0000 mon.vm01 (mon.0) 718 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.479814+0000 mon.vm01 (mon.0) 719 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.479814+0000 mon.vm01 (mon.0) 719 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: cephadm 2026-04-16T19:23:05.480480+0000 mgr.vm01.nwhpas (mgr.14227) 127 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: cephadm 2026-04-16T19:23:05.480480+0000 mgr.vm01.nwhpas (mgr.14227) 127 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.484233+0000 mon.vm01 (mon.0) 720 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.484233+0000 mon.vm01 (mon.0) 720 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.491798+0000 mon.vm01 (mon.0) 721 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.491798+0000 mon.vm01 (mon.0) 721 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.497257+0000 mon.vm01 (mon.0) 722 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: audit 2026-04-16T19:23:05.497257+0000 mon.vm01 (mon.0) 722 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: cephadm 2026-04-16T19:23:05.503051+0000 mgr.vm01.nwhpas (mgr.14227) 128 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm04.bfwsbq on vm04 2026-04-16T19:23:06.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:05 vm01 bash[28222]: cephadm 2026-04-16T19:23:05.503051+0000 mgr.vm01.nwhpas (mgr.14227) 128 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm04.bfwsbq on vm04 2026-04-16T19:23:07.012 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:07.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:05.926600+0000 mgr.vm01.nwhpas (mgr.14227) 129 : audit [DBG] from='client.14610 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:07.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:05.926600+0000 mgr.vm01.nwhpas (mgr.14227) 129 : audit [DBG] from='client.14610 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:07.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:05.927482+0000 mon.vm01 (mon.0) 723 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:05.927482+0000 mon.vm01 (mon.0) 723 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:05.928290+0000 mon.vm01 (mon.0) 724 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:05.928290+0000 mon.vm01 (mon.0) 724 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:05.928806+0000 mon.vm01 (mon.0) 725 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:05.928806+0000 mon.vm01 (mon.0) 725 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:05.929319+0000 mon.vm01 (mon.0) 726 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:05.929319+0000 mon.vm01 (mon.0) 726 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:06.427705+0000 mon.vm01 (mon.0) 727 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:06.427705+0000 mon.vm01 (mon.0) 727 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:06.427765+0000 mon.vm01 (mon.0) 728 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:06.427765+0000 mon.vm01 (mon.0) 728 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:06.427794+0000 mon.vm01 (mon.0) 729 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: audit 2026-04-16T19:23:06.427794+0000 mon.vm01 (mon.0) 729 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: cluster 2026-04-16T19:23:06.430522+0000 mon.vm01 (mon.0) 730 : cluster [DBG] osdmap e28: 8 total, 8 up, 8 in 2026-04-16T19:23:07.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:06 vm04 bash[34817]: cluster 2026-04-16T19:23:06.430522+0000 mon.vm01 (mon.0) 730 : cluster [DBG] osdmap e28: 8 total, 8 up, 8 in 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:05.926600+0000 mgr.vm01.nwhpas (mgr.14227) 129 : audit [DBG] from='client.14610 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:05.926600+0000 mgr.vm01.nwhpas (mgr.14227) 129 : audit [DBG] from='client.14610 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:05.927482+0000 mon.vm01 (mon.0) 723 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:05.927482+0000 mon.vm01 (mon.0) 723 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:05.928290+0000 mon.vm01 (mon.0) 724 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:05.928290+0000 mon.vm01 (mon.0) 724 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:05.928806+0000 mon.vm01 (mon.0) 725 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:05.928806+0000 mon.vm01 (mon.0) 725 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:05.929319+0000 mon.vm01 (mon.0) 726 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:05.929319+0000 mon.vm01 (mon.0) 726 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:06.427705+0000 mon.vm01 (mon.0) 727 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:06.427705+0000 mon.vm01 (mon.0) 727 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:06.427765+0000 mon.vm01 (mon.0) 728 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:06.427765+0000 mon.vm01 (mon.0) 728 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:06.427794+0000 mon.vm01 (mon.0) 729 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: audit 2026-04-16T19:23:06.427794+0000 mon.vm01 (mon.0) 729 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: cluster 2026-04-16T19:23:06.430522+0000 mon.vm01 (mon.0) 730 : cluster [DBG] osdmap e28: 8 total, 8 up, 8 in 2026-04-16T19:23:07.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:06 vm01 bash[28222]: cluster 2026-04-16T19:23:06.430522+0000 mon.vm01 (mon.0) 730 : cluster [DBG] osdmap e28: 8 total, 8 up, 8 in 2026-04-16T19:23:07.284 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:07.946 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:07.946 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:22:54.415340Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:08.020 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:08.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: cluster 2026-04-16T19:23:07.271225+0000 mgr.vm01.nwhpas (mgr.14227) 130 : cluster [DBG] pgmap v61: 65 pgs: 26 creating+peering, 29 unknown, 10 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s wr, 0 op/s 2026-04-16T19:23:08.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: cluster 2026-04-16T19:23:07.271225+0000 mgr.vm01.nwhpas (mgr.14227) 130 : cluster [DBG] pgmap v61: 65 pgs: 26 creating+peering, 29 unknown, 10 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s wr, 0 op/s 2026-04-16T19:23:08.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.559751+0000 mon.vm01 (mon.0) 731 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:08.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.559751+0000 mon.vm01 (mon.0) 731 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:08.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: cluster 2026-04-16T19:23:07.574832+0000 mon.vm01 (mon.0) 732 : cluster [DBG] osdmap e29: 8 total, 8 up, 8 in 2026-04-16T19:23:08.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: cluster 2026-04-16T19:23:07.574832+0000 mon.vm01 (mon.0) 732 : cluster [DBG] osdmap e29: 8 total, 8 up, 8 in 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.576640+0000 mon.vm04 (mon.1) 29 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.576640+0000 mon.vm04 (mon.1) 29 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.576783+0000 mon.vm04 (mon.1) 30 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.576783+0000 mon.vm04 (mon.1) 30 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.579124+0000 mon.vm01 (mon.0) 733 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.579124+0000 mon.vm01 (mon.0) 733 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.587507+0000 mon.vm01 (mon.0) 734 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.587507+0000 mon.vm01 (mon.0) 734 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.587631+0000 mon.vm01 (mon.0) 735 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.587631+0000 mon.vm01 (mon.0) 735 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.587701+0000 mon.vm01 (mon.0) 736 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:07 vm04 bash[34817]: audit 2026-04-16T19:23:07.587701+0000 mon.vm01 (mon.0) 736 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: cluster 2026-04-16T19:23:07.271225+0000 mgr.vm01.nwhpas (mgr.14227) 130 : cluster [DBG] pgmap v61: 65 pgs: 26 creating+peering, 29 unknown, 10 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s wr, 0 op/s 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: cluster 2026-04-16T19:23:07.271225+0000 mgr.vm01.nwhpas (mgr.14227) 130 : cluster [DBG] pgmap v61: 65 pgs: 26 creating+peering, 29 unknown, 10 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 255 B/s wr, 0 op/s 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.559751+0000 mon.vm01 (mon.0) 731 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.559751+0000 mon.vm01 (mon.0) 731 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: cluster 2026-04-16T19:23:07.574832+0000 mon.vm01 (mon.0) 732 : cluster [DBG] osdmap e29: 8 total, 8 up, 8 in 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: cluster 2026-04-16T19:23:07.574832+0000 mon.vm01 (mon.0) 732 : cluster [DBG] osdmap e29: 8 total, 8 up, 8 in 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.576640+0000 mon.vm04 (mon.1) 29 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.576640+0000 mon.vm04 (mon.1) 29 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.576783+0000 mon.vm04 (mon.1) 30 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.576783+0000 mon.vm04 (mon.1) 30 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.579124+0000 mon.vm01 (mon.0) 733 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.579124+0000 mon.vm01 (mon.0) 733 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.587507+0000 mon.vm01 (mon.0) 734 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.587507+0000 mon.vm01 (mon.0) 734 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.587631+0000 mon.vm01 (mon.0) 735 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.587631+0000 mon.vm01 (mon.0) 735 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.587701+0000 mon.vm01 (mon.0) 736 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:08.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:07 vm01 bash[28222]: audit 2026-04-16T19:23:07.587701+0000 mon.vm01 (mon.0) 736 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-16T19:23:09.020 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:09.296 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:07.940331+0000 mgr.vm01.nwhpas (mgr.14227) 131 : audit [DBG] from='client.14614 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:07.940331+0000 mgr.vm01.nwhpas (mgr.14227) 131 : audit [DBG] from='client.14614 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:07.941061+0000 mon.vm01 (mon.0) 737 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:07.941061+0000 mon.vm01 (mon.0) 737 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:07.941778+0000 mon.vm01 (mon.0) 738 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:07.941778+0000 mon.vm01 (mon.0) 738 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:07.942207+0000 mon.vm01 (mon.0) 739 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:07.942207+0000 mon.vm01 (mon.0) 739 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:07.942629+0000 mon.vm01 (mon.0) 740 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:07.942629+0000 mon.vm01 (mon.0) 740 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:08.579488+0000 mon.vm01 (mon.0) 741 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:08.579488+0000 mon.vm01 (mon.0) 741 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:08.579550+0000 mon.vm01 (mon.0) 742 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:08.579550+0000 mon.vm01 (mon.0) 742 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:08.579593+0000 mon.vm01 (mon.0) 743 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:08.579593+0000 mon.vm01 (mon.0) 743 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:08.579636+0000 mon.vm01 (mon.0) 744 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: audit 2026-04-16T19:23:08.579636+0000 mon.vm01 (mon.0) 744 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: cluster 2026-04-16T19:23:08.582673+0000 mon.vm01 (mon.0) 745 : cluster [DBG] osdmap e30: 8 total, 8 up, 8 in 2026-04-16T19:23:09.301 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:08 vm04 bash[34817]: cluster 2026-04-16T19:23:08.582673+0000 mon.vm01 (mon.0) 745 : cluster [DBG] osdmap e30: 8 total, 8 up, 8 in 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:07.940331+0000 mgr.vm01.nwhpas (mgr.14227) 131 : audit [DBG] from='client.14614 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:07.940331+0000 mgr.vm01.nwhpas (mgr.14227) 131 : audit [DBG] from='client.14614 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:07.941061+0000 mon.vm01 (mon.0) 737 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:07.941061+0000 mon.vm01 (mon.0) 737 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:07.941778+0000 mon.vm01 (mon.0) 738 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:07.941778+0000 mon.vm01 (mon.0) 738 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:07.942207+0000 mon.vm01 (mon.0) 739 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:07.942207+0000 mon.vm01 (mon.0) 739 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:07.942629+0000 mon.vm01 (mon.0) 740 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:07.942629+0000 mon.vm01 (mon.0) 740 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:08.579488+0000 mon.vm01 (mon.0) 741 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:08.579488+0000 mon.vm01 (mon.0) 741 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:08.579550+0000 mon.vm01 (mon.0) 742 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:08.579550+0000 mon.vm01 (mon.0) 742 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:08.579593+0000 mon.vm01 (mon.0) 743 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:08.579593+0000 mon.vm01 (mon.0) 743 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:08.579636+0000 mon.vm01 (mon.0) 744 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: audit 2026-04-16T19:23:08.579636+0000 mon.vm01 (mon.0) 744 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: cluster 2026-04-16T19:23:08.582673+0000 mon.vm01 (mon.0) 745 : cluster [DBG] osdmap e30: 8 total, 8 up, 8 in 2026-04-16T19:23:09.317 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:08 vm01 bash[28222]: cluster 2026-04-16T19:23:08.582673+0000 mon.vm01 (mon.0) 745 : cluster [DBG] osdmap e30: 8 total, 8 up, 8 in 2026-04-16T19:23:09.551 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:09 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:09.551 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:09 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:09.767 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:09.768 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:09.696768Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:09.838 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:10.839 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: cluster 2026-04-16T19:23:09.271759+0000 mgr.vm01.nwhpas (mgr.14227) 132 : cluster [DBG] pgmap v64: 97 pgs: 19 creating+peering, 19 unknown, 59 active+clean; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 2.7 KiB/s wr, 16 op/s 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: cluster 2026-04-16T19:23:09.271759+0000 mgr.vm01.nwhpas (mgr.14227) 132 : cluster [DBG] pgmap v64: 97 pgs: 19 creating+peering, 19 unknown, 59 active+clean; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 2.7 KiB/s wr, 16 op/s 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: cluster 2026-04-16T19:23:09.586759+0000 mon.vm01 (mon.0) 746 : cluster [DBG] osdmap e31: 8 total, 8 up, 8 in 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: cluster 2026-04-16T19:23:09.586759+0000 mon.vm01 (mon.0) 746 : cluster [DBG] osdmap e31: 8 total, 8 up, 8 in 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.588105+0000 mon.vm04 (mon.1) 31 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.588105+0000 mon.vm04 (mon.1) 31 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.588263+0000 mon.vm04 (mon.1) 32 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.588263+0000 mon.vm04 (mon.1) 32 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.589538+0000 mon.vm01 (mon.0) 747 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.589538+0000 mon.vm01 (mon.0) 747 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.589644+0000 mon.vm01 (mon.0) 748 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.589644+0000 mon.vm01 (mon.0) 748 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.589741+0000 mon.vm01 (mon.0) 749 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.589741+0000 mon.vm01 (mon.0) 749 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.596818+0000 mon.vm01 (mon.0) 750 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.596818+0000 mon.vm01 (mon.0) 750 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.684857+0000 mon.vm01 (mon.0) 751 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.684857+0000 mon.vm01 (mon.0) 751 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.691616+0000 mon.vm01 (mon.0) 752 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.691616+0000 mon.vm01 (mon.0) 752 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.696465+0000 mon.vm01 (mon.0) 753 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.696465+0000 mon.vm01 (mon.0) 753 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: cephadm 2026-04-16T19:23:09.698448+0000 mgr.vm01.nwhpas (mgr.14227) 133 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm01.fvwjhu on vm01 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: cephadm 2026-04-16T19:23:09.698448+0000 mgr.vm01.nwhpas (mgr.14227) 133 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm01.fvwjhu on vm01 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.758952+0000 mgr.vm01.nwhpas (mgr.14227) 134 : audit [DBG] from='client.14618 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.758952+0000 mgr.vm01.nwhpas (mgr.14227) 134 : audit [DBG] from='client.14618 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.759746+0000 mon.vm01 (mon.0) 754 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.759746+0000 mon.vm01 (mon.0) 754 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.760716+0000 mon.vm01 (mon.0) 755 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.760716+0000 mon.vm01 (mon.0) 755 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.761584+0000 mon.vm01 (mon.0) 756 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.761584+0000 mon.vm01 (mon.0) 756 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.762536+0000 mon.vm01 (mon.0) 757 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.762536+0000 mon.vm01 (mon.0) 757 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.891814+0000 mon.vm01 (mon.0) 758 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:10 vm04 bash[34817]: audit 2026-04-16T19:23:09.891814+0000 mon.vm01 (mon.0) 758 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: cluster 2026-04-16T19:23:09.271759+0000 mgr.vm01.nwhpas (mgr.14227) 132 : cluster [DBG] pgmap v64: 97 pgs: 19 creating+peering, 19 unknown, 59 active+clean; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 2.7 KiB/s wr, 16 op/s 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: cluster 2026-04-16T19:23:09.271759+0000 mgr.vm01.nwhpas (mgr.14227) 132 : cluster [DBG] pgmap v64: 97 pgs: 19 creating+peering, 19 unknown, 59 active+clean; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 2.7 KiB/s wr, 16 op/s 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: cluster 2026-04-16T19:23:09.586759+0000 mon.vm01 (mon.0) 746 : cluster [DBG] osdmap e31: 8 total, 8 up, 8 in 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: cluster 2026-04-16T19:23:09.586759+0000 mon.vm01 (mon.0) 746 : cluster [DBG] osdmap e31: 8 total, 8 up, 8 in 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.588105+0000 mon.vm04 (mon.1) 31 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.588105+0000 mon.vm04 (mon.1) 31 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.588263+0000 mon.vm04 (mon.1) 32 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.588263+0000 mon.vm04 (mon.1) 32 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.589538+0000 mon.vm01 (mon.0) 747 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.589538+0000 mon.vm01 (mon.0) 747 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.589644+0000 mon.vm01 (mon.0) 748 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.589644+0000 mon.vm01 (mon.0) 748 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.589741+0000 mon.vm01 (mon.0) 749 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.589741+0000 mon.vm01 (mon.0) 749 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.596818+0000 mon.vm01 (mon.0) 750 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.596818+0000 mon.vm01 (mon.0) 750 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.684857+0000 mon.vm01 (mon.0) 751 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.684857+0000 mon.vm01 (mon.0) 751 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.691616+0000 mon.vm01 (mon.0) 752 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.691616+0000 mon.vm01 (mon.0) 752 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.696465+0000 mon.vm01 (mon.0) 753 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.696465+0000 mon.vm01 (mon.0) 753 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: cephadm 2026-04-16T19:23:09.698448+0000 mgr.vm01.nwhpas (mgr.14227) 133 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm01.fvwjhu on vm01 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: cephadm 2026-04-16T19:23:09.698448+0000 mgr.vm01.nwhpas (mgr.14227) 133 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm01.fvwjhu on vm01 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.758952+0000 mgr.vm01.nwhpas (mgr.14227) 134 : audit [DBG] from='client.14618 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.758952+0000 mgr.vm01.nwhpas (mgr.14227) 134 : audit [DBG] from='client.14618 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.759746+0000 mon.vm01 (mon.0) 754 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.759746+0000 mon.vm01 (mon.0) 754 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.760716+0000 mon.vm01 (mon.0) 755 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.760716+0000 mon.vm01 (mon.0) 755 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.761584+0000 mon.vm01 (mon.0) 756 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.761584+0000 mon.vm01 (mon.0) 756 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.762536+0000 mon.vm01 (mon.0) 757 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.762536+0000 mon.vm01 (mon.0) 757 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.891814+0000 mon.vm01 (mon.0) 758 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:10.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:10 vm01 bash[28222]: audit 2026-04-16T19:23:09.891814+0000 mon.vm01 (mon.0) 758 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:11.124 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:11.529 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:11.529 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:09.696768Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:11.620 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:11.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.588132+0000 mon.vm01 (mon.0) 759 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.588132+0000 mon.vm01 (mon.0) 759 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.588307+0000 mon.vm01 (mon.0) 760 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.588307+0000 mon.vm01 (mon.0) 760 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.588458+0000 mon.vm01 (mon.0) 761 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.588458+0000 mon.vm01 (mon.0) 761 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.588583+0000 mon.vm01 (mon.0) 762 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.588583+0000 mon.vm01 (mon.0) 762 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: cluster 2026-04-16T19:23:10.593202+0000 mon.vm01 (mon.0) 763 : cluster [DBG] osdmap e32: 8 total, 8 up, 8 in 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: cluster 2026-04-16T19:23:10.593202+0000 mon.vm01 (mon.0) 763 : cluster [DBG] osdmap e32: 8 total, 8 up, 8 in 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.593877+0000 mon.vm04 (mon.1) 33 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.593877+0000 mon.vm04 (mon.1) 33 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.599727+0000 mon.vm01 (mon.0) 764 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.599727+0000 mon.vm01 (mon.0) 764 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.603458+0000 mon.vm01 (mon.0) 765 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.603458+0000 mon.vm01 (mon.0) 765 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.604829+0000 mon.vm04 (mon.1) 34 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.604829+0000 mon.vm04 (mon.1) 34 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.605785+0000 mon.vm01 (mon.0) 766 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.605785+0000 mon.vm01 (mon.0) 766 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.616219+0000 mon.vm01 (mon.0) 767 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:10.616219+0000 mon.vm01 (mon.0) 767 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.524689+0000 mon.vm01 (mon.0) 768 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.524689+0000 mon.vm01 (mon.0) 768 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.525413+0000 mon.vm01 (mon.0) 769 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.525413+0000 mon.vm01 (mon.0) 769 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.525926+0000 mon.vm01 (mon.0) 770 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.525926+0000 mon.vm01 (mon.0) 770 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.526402+0000 mon.vm01 (mon.0) 771 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.526402+0000 mon.vm01 (mon.0) 771 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.594421+0000 mon.vm01 (mon.0) 772 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.594421+0000 mon.vm01 (mon.0) 772 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.594506+0000 mon.vm01 (mon.0) 773 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.594506+0000 mon.vm01 (mon.0) 773 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.594657+0000 mon.vm01 (mon.0) 774 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.594657+0000 mon.vm01 (mon.0) 774 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.594734+0000 mon.vm01 (mon.0) 775 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: audit 2026-04-16T19:23:11.594734+0000 mon.vm01 (mon.0) 775 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: cluster 2026-04-16T19:23:11.598191+0000 mon.vm01 (mon.0) 776 : cluster [DBG] osdmap e33: 8 total, 8 up, 8 in 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:11 vm04 bash[34817]: cluster 2026-04-16T19:23:11.598191+0000 mon.vm01 (mon.0) 776 : cluster [DBG] osdmap e33: 8 total, 8 up, 8 in 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.588132+0000 mon.vm01 (mon.0) 759 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.588132+0000 mon.vm01 (mon.0) 759 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.588307+0000 mon.vm01 (mon.0) 760 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.588307+0000 mon.vm01 (mon.0) 760 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.588458+0000 mon.vm01 (mon.0) 761 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.588458+0000 mon.vm01 (mon.0) 761 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.588583+0000 mon.vm01 (mon.0) 762 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.588583+0000 mon.vm01 (mon.0) 762 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: cluster 2026-04-16T19:23:10.593202+0000 mon.vm01 (mon.0) 763 : cluster [DBG] osdmap e32: 8 total, 8 up, 8 in 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: cluster 2026-04-16T19:23:10.593202+0000 mon.vm01 (mon.0) 763 : cluster [DBG] osdmap e32: 8 total, 8 up, 8 in 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.593877+0000 mon.vm04 (mon.1) 33 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.593877+0000 mon.vm04 (mon.1) 33 : audit [INF] from='client.? 192.168.123.104:0/1449206616' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.599727+0000 mon.vm01 (mon.0) 764 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.599727+0000 mon.vm01 (mon.0) 764 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.603458+0000 mon.vm01 (mon.0) 765 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.603458+0000 mon.vm01 (mon.0) 765 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.604829+0000 mon.vm04 (mon.1) 34 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.604829+0000 mon.vm04 (mon.1) 34 : audit [INF] from='client.? 192.168.123.104:0/1742412008' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.605785+0000 mon.vm01 (mon.0) 766 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.605785+0000 mon.vm01 (mon.0) 766 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.616219+0000 mon.vm01 (mon.0) 767 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:10.616219+0000 mon.vm01 (mon.0) 767 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.524689+0000 mon.vm01 (mon.0) 768 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.524689+0000 mon.vm01 (mon.0) 768 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.525413+0000 mon.vm01 (mon.0) 769 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.525413+0000 mon.vm01 (mon.0) 769 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.525926+0000 mon.vm01 (mon.0) 770 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.525926+0000 mon.vm01 (mon.0) 770 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.526402+0000 mon.vm01 (mon.0) 771 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.526402+0000 mon.vm01 (mon.0) 771 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.594421+0000 mon.vm01 (mon.0) 772 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.594421+0000 mon.vm01 (mon.0) 772 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.uxumrv' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.594506+0000 mon.vm01 (mon.0) 773 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.594506+0000 mon.vm01 (mon.0) 773 : audit [INF] from='client.? 192.168.123.101:0/1340363391' entity='client.rgw.foo.vm01.pktgwy' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.594657+0000 mon.vm01 (mon.0) 774 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.594657+0000 mon.vm01 (mon.0) 774 : audit [INF] from='client.? 192.168.123.101:0/331594446' entity='client.rgw.foo.vm01.qgurbb' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.594734+0000 mon.vm01 (mon.0) 775 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: audit 2026-04-16T19:23:11.594734+0000 mon.vm01 (mon.0) 775 : audit [INF] from='client.? ' entity='client.rgw.foo.vm04.rpimxa' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: cluster 2026-04-16T19:23:11.598191+0000 mon.vm01 (mon.0) 776 : cluster [DBG] osdmap e33: 8 total, 8 up, 8 in 2026-04-16T19:23:11.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:11 vm01 bash[28222]: cluster 2026-04-16T19:23:11.598191+0000 mon.vm01 (mon.0) 776 : cluster [DBG] osdmap e33: 8 total, 8 up, 8 in 2026-04-16T19:23:12.621 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:12.942 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:12.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:12 vm04 bash[34817]: cluster 2026-04-16T19:23:11.272307+0000 mgr.vm01.nwhpas (mgr.14227) 135 : cluster [DBG] pgmap v67: 129 pgs: 13 creating+peering, 16 unknown, 100 active+clean; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 2.5 KiB/s wr, 16 op/s 2026-04-16T19:23:12.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:12 vm04 bash[34817]: cluster 2026-04-16T19:23:11.272307+0000 mgr.vm01.nwhpas (mgr.14227) 135 : cluster [DBG] pgmap v67: 129 pgs: 13 creating+peering, 16 unknown, 100 active+clean; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 2.5 KiB/s wr, 16 op/s 2026-04-16T19:23:12.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:12 vm04 bash[34817]: audit 2026-04-16T19:23:11.523892+0000 mgr.vm01.nwhpas (mgr.14227) 136 : audit [DBG] from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:12.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:12 vm04 bash[34817]: audit 2026-04-16T19:23:11.523892+0000 mgr.vm01.nwhpas (mgr.14227) 136 : audit [DBG] from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:12.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:12 vm01 bash[28222]: cluster 2026-04-16T19:23:11.272307+0000 mgr.vm01.nwhpas (mgr.14227) 135 : cluster [DBG] pgmap v67: 129 pgs: 13 creating+peering, 16 unknown, 100 active+clean; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 2.5 KiB/s wr, 16 op/s 2026-04-16T19:23:12.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:12 vm01 bash[28222]: cluster 2026-04-16T19:23:11.272307+0000 mgr.vm01.nwhpas (mgr.14227) 135 : cluster [DBG] pgmap v67: 129 pgs: 13 creating+peering, 16 unknown, 100 active+clean; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 2.5 KiB/s wr, 16 op/s 2026-04-16T19:23:12.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:12 vm01 bash[28222]: audit 2026-04-16T19:23:11.523892+0000 mgr.vm01.nwhpas (mgr.14227) 136 : audit [DBG] from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:12.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:12 vm01 bash[28222]: audit 2026-04-16T19:23:11.523892+0000 mgr.vm01.nwhpas (mgr.14227) 136 : audit [DBG] from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:13.607 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:13.607 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:09.696768Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:13.744 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:13 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:13.744 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:13 vm01 bash[28222]: audit 2026-04-16T19:23:13.599517+0000 mon.vm01 (mon.0) 777 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.744 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:13 vm01 bash[28222]: audit 2026-04-16T19:23:13.599517+0000 mon.vm01 (mon.0) 777 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.744 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:13 vm01 bash[28222]: audit 2026-04-16T19:23:13.600205+0000 mon.vm01 (mon.0) 778 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.744 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:13 vm01 bash[28222]: audit 2026-04-16T19:23:13.600205+0000 mon.vm01 (mon.0) 778 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.744 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:13 vm01 bash[28222]: audit 2026-04-16T19:23:13.600719+0000 mon.vm01 (mon.0) 779 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.745 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:13 vm01 bash[28222]: audit 2026-04-16T19:23:13.600719+0000 mon.vm01 (mon.0) 779 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.745 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:13 vm01 bash[28222]: audit 2026-04-16T19:23:13.601232+0000 mon.vm01 (mon.0) 780 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.745 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:13 vm01 bash[28222]: audit 2026-04-16T19:23:13.601232+0000 mon.vm01 (mon.0) 780 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.924 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:13.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:13 vm04 bash[34817]: audit 2026-04-16T19:23:13.599517+0000 mon.vm01 (mon.0) 777 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:13 vm04 bash[34817]: audit 2026-04-16T19:23:13.599517+0000 mon.vm01 (mon.0) 777 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:13 vm04 bash[34817]: audit 2026-04-16T19:23:13.600205+0000 mon.vm01 (mon.0) 778 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:13 vm04 bash[34817]: audit 2026-04-16T19:23:13.600205+0000 mon.vm01 (mon.0) 778 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:13 vm04 bash[34817]: audit 2026-04-16T19:23:13.600719+0000 mon.vm01 (mon.0) 779 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:13 vm04 bash[34817]: audit 2026-04-16T19:23:13.600719+0000 mon.vm01 (mon.0) 779 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:13 vm04 bash[34817]: audit 2026-04-16T19:23:13.601232+0000 mon.vm01 (mon.0) 780 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:13.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:13 vm04 bash[34817]: audit 2026-04-16T19:23:13.601232+0000 mon.vm01 (mon.0) 780 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:14.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:13 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:14.925 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: cluster 2026-04-16T19:23:13.272747+0000 mgr.vm01.nwhpas (mgr.14227) 137 : cluster [DBG] pgmap v69: 129 pgs: 5 creating+peering, 9 unknown, 115 active+clean; 579 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 120 KiB/s rd, 3.8 KiB/s wr, 215 op/s 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: cluster 2026-04-16T19:23:13.272747+0000 mgr.vm01.nwhpas (mgr.14227) 137 : cluster [DBG] pgmap v69: 129 pgs: 5 creating+peering, 9 unknown, 115 active+clean; 579 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 120 KiB/s rd, 3.8 KiB/s wr, 215 op/s 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: audit 2026-04-16T19:23:13.598762+0000 mgr.vm01.nwhpas (mgr.14227) 138 : audit [DBG] from='client.14626 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: audit 2026-04-16T19:23:13.598762+0000 mgr.vm01.nwhpas (mgr.14227) 138 : audit [DBG] from='client.14626 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: audit 2026-04-16T19:23:13.892650+0000 mon.vm01 (mon.0) 781 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: audit 2026-04-16T19:23:13.892650+0000 mon.vm01 (mon.0) 781 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: audit 2026-04-16T19:23:13.897777+0000 mon.vm01 (mon.0) 782 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: audit 2026-04-16T19:23:13.897777+0000 mon.vm01 (mon.0) 782 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: audit 2026-04-16T19:23:13.902910+0000 mon.vm01 (mon.0) 783 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: audit 2026-04-16T19:23:13.902910+0000 mon.vm01 (mon.0) 783 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: audit 2026-04-16T19:23:13.912237+0000 mon.vm01 (mon.0) 784 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.933 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:14 vm01 bash[28222]: audit 2026-04-16T19:23:13.912237+0000 mon.vm01 (mon.0) 784 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: cluster 2026-04-16T19:23:13.272747+0000 mgr.vm01.nwhpas (mgr.14227) 137 : cluster [DBG] pgmap v69: 129 pgs: 5 creating+peering, 9 unknown, 115 active+clean; 579 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 120 KiB/s rd, 3.8 KiB/s wr, 215 op/s 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: cluster 2026-04-16T19:23:13.272747+0000 mgr.vm01.nwhpas (mgr.14227) 137 : cluster [DBG] pgmap v69: 129 pgs: 5 creating+peering, 9 unknown, 115 active+clean; 579 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 120 KiB/s rd, 3.8 KiB/s wr, 215 op/s 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: audit 2026-04-16T19:23:13.598762+0000 mgr.vm01.nwhpas (mgr.14227) 138 : audit [DBG] from='client.14626 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: audit 2026-04-16T19:23:13.598762+0000 mgr.vm01.nwhpas (mgr.14227) 138 : audit [DBG] from='client.14626 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: audit 2026-04-16T19:23:13.892650+0000 mon.vm01 (mon.0) 781 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: audit 2026-04-16T19:23:13.892650+0000 mon.vm01 (mon.0) 781 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: audit 2026-04-16T19:23:13.897777+0000 mon.vm01 (mon.0) 782 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: audit 2026-04-16T19:23:13.897777+0000 mon.vm01 (mon.0) 782 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: audit 2026-04-16T19:23:13.902910+0000 mon.vm01 (mon.0) 783 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: audit 2026-04-16T19:23:13.902910+0000 mon.vm01 (mon.0) 783 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: audit 2026-04-16T19:23:13.912237+0000 mon.vm01 (mon.0) 784 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:14.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:14 vm04 bash[34817]: audit 2026-04-16T19:23:13.912237+0000 mon.vm01 (mon.0) 784 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:15.220 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:15.652 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:15.652 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:13.903419Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:15.735 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: cephadm 2026-04-16T19:23:13.912651+0000 mgr.vm01.nwhpas (mgr.14227) 139 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm04 interface ens3 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: cephadm 2026-04-16T19:23:13.912651+0000 mgr.vm01.nwhpas (mgr.14227) 139 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm04 interface ens3 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: cephadm 2026-04-16T19:23:13.912718+0000 mgr.vm01.nwhpas (mgr.14227) 140 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm01 interface ens3 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: cephadm 2026-04-16T19:23:13.912718+0000 mgr.vm01.nwhpas (mgr.14227) 140 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm01 interface ens3 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: cephadm 2026-04-16T19:23:13.915625+0000 mgr.vm01.nwhpas (mgr.14227) 141 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm04.bxakqk on vm04 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: cephadm 2026-04-16T19:23:13.915625+0000 mgr.vm01.nwhpas (mgr.14227) 141 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm04.bxakqk on vm04 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: audit 2026-04-16T19:23:15.647822+0000 mon.vm01 (mon.0) 785 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: audit 2026-04-16T19:23:15.647822+0000 mon.vm01 (mon.0) 785 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: audit 2026-04-16T19:23:15.648476+0000 mon.vm01 (mon.0) 786 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: audit 2026-04-16T19:23:15.648476+0000 mon.vm01 (mon.0) 786 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: audit 2026-04-16T19:23:15.648869+0000 mon.vm01 (mon.0) 787 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: audit 2026-04-16T19:23:15.648869+0000 mon.vm01 (mon.0) 787 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: audit 2026-04-16T19:23:15.649237+0000 mon.vm01 (mon.0) 788 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:15 vm04 bash[34817]: audit 2026-04-16T19:23:15.649237+0000 mon.vm01 (mon.0) 788 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: cephadm 2026-04-16T19:23:13.912651+0000 mgr.vm01.nwhpas (mgr.14227) 139 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm04 interface ens3 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: cephadm 2026-04-16T19:23:13.912651+0000 mgr.vm01.nwhpas (mgr.14227) 139 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm04 interface ens3 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: cephadm 2026-04-16T19:23:13.912718+0000 mgr.vm01.nwhpas (mgr.14227) 140 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm01 interface ens3 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: cephadm 2026-04-16T19:23:13.912718+0000 mgr.vm01.nwhpas (mgr.14227) 140 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm01 interface ens3 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: cephadm 2026-04-16T19:23:13.915625+0000 mgr.vm01.nwhpas (mgr.14227) 141 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm04.bxakqk on vm04 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: cephadm 2026-04-16T19:23:13.915625+0000 mgr.vm01.nwhpas (mgr.14227) 141 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm04.bxakqk on vm04 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: audit 2026-04-16T19:23:15.647822+0000 mon.vm01 (mon.0) 785 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: audit 2026-04-16T19:23:15.647822+0000 mon.vm01 (mon.0) 785 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: audit 2026-04-16T19:23:15.648476+0000 mon.vm01 (mon.0) 786 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: audit 2026-04-16T19:23:15.648476+0000 mon.vm01 (mon.0) 786 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: audit 2026-04-16T19:23:15.648869+0000 mon.vm01 (mon.0) 787 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: audit 2026-04-16T19:23:15.648869+0000 mon.vm01 (mon.0) 787 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: audit 2026-04-16T19:23:15.649237+0000 mon.vm01 (mon.0) 788 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:15.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:15 vm01 bash[28222]: audit 2026-04-16T19:23:15.649237+0000 mon.vm01 (mon.0) 788 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:16.737 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:16.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:16 vm04 bash[34817]: cluster 2026-04-16T19:23:15.273184+0000 mgr.vm01.nwhpas (mgr.14227) 142 : cluster [DBG] pgmap v70: 129 pgs: 129 active+clean; 583 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 273 KiB/s rd, 6.2 KiB/s wr, 503 op/s 2026-04-16T19:23:16.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:16 vm04 bash[34817]: cluster 2026-04-16T19:23:15.273184+0000 mgr.vm01.nwhpas (mgr.14227) 142 : cluster [DBG] pgmap v70: 129 pgs: 129 active+clean; 583 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 273 KiB/s rd, 6.2 KiB/s wr, 503 op/s 2026-04-16T19:23:16.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:16 vm04 bash[34817]: audit 2026-04-16T19:23:15.647161+0000 mgr.vm01.nwhpas (mgr.14227) 143 : audit [DBG] from='client.14630 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:16.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:16 vm04 bash[34817]: audit 2026-04-16T19:23:15.647161+0000 mgr.vm01.nwhpas (mgr.14227) 143 : audit [DBG] from='client.14630 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:16.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:16 vm01 bash[28222]: cluster 2026-04-16T19:23:15.273184+0000 mgr.vm01.nwhpas (mgr.14227) 142 : cluster [DBG] pgmap v70: 129 pgs: 129 active+clean; 583 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 273 KiB/s rd, 6.2 KiB/s wr, 503 op/s 2026-04-16T19:23:16.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:16 vm01 bash[28222]: cluster 2026-04-16T19:23:15.273184+0000 mgr.vm01.nwhpas (mgr.14227) 142 : cluster [DBG] pgmap v70: 129 pgs: 129 active+clean; 583 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 273 KiB/s rd, 6.2 KiB/s wr, 503 op/s 2026-04-16T19:23:16.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:16 vm01 bash[28222]: audit 2026-04-16T19:23:15.647161+0000 mgr.vm01.nwhpas (mgr.14227) 143 : audit [DBG] from='client.14630 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:16.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:16 vm01 bash[28222]: audit 2026-04-16T19:23:15.647161+0000 mgr.vm01.nwhpas (mgr.14227) 143 : audit [DBG] from='client.14630 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:17.029 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:17.425 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:17.425 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:13.903419Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:17.505 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:17.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:17 vm04 bash[34817]: audit 2026-04-16T19:23:17.420559+0000 mon.vm01 (mon.0) 789 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:17.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:17 vm04 bash[34817]: audit 2026-04-16T19:23:17.420559+0000 mon.vm01 (mon.0) 789 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:17.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:17 vm04 bash[34817]: audit 2026-04-16T19:23:17.421448+0000 mon.vm01 (mon.0) 790 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:17.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:17 vm04 bash[34817]: audit 2026-04-16T19:23:17.421448+0000 mon.vm01 (mon.0) 790 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:17.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:17 vm04 bash[34817]: audit 2026-04-16T19:23:17.422076+0000 mon.vm01 (mon.0) 791 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:17.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:17 vm04 bash[34817]: audit 2026-04-16T19:23:17.422076+0000 mon.vm01 (mon.0) 791 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:17.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:17 vm04 bash[34817]: audit 2026-04-16T19:23:17.422648+0000 mon.vm01 (mon.0) 792 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:17.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:17 vm04 bash[34817]: audit 2026-04-16T19:23:17.422648+0000 mon.vm01 (mon.0) 792 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:18.047 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:17 vm01 bash[28222]: audit 2026-04-16T19:23:17.420559+0000 mon.vm01 (mon.0) 789 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:18.047 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:17 vm01 bash[28222]: audit 2026-04-16T19:23:17.420559+0000 mon.vm01 (mon.0) 789 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:18.047 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:17 vm01 bash[28222]: audit 2026-04-16T19:23:17.421448+0000 mon.vm01 (mon.0) 790 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:18.047 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:17 vm01 bash[28222]: audit 2026-04-16T19:23:17.421448+0000 mon.vm01 (mon.0) 790 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:18.047 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:17 vm01 bash[28222]: audit 2026-04-16T19:23:17.422076+0000 mon.vm01 (mon.0) 791 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:18.047 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:17 vm01 bash[28222]: audit 2026-04-16T19:23:17.422076+0000 mon.vm01 (mon.0) 791 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:18.047 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:17 vm01 bash[28222]: audit 2026-04-16T19:23:17.422648+0000 mon.vm01 (mon.0) 792 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:18.047 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:17 vm01 bash[28222]: audit 2026-04-16T19:23:17.422648+0000 mon.vm01 (mon.0) 792 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:18.289 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:18.505 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:18.542 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:18.820 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:18.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 bash[34817]: cluster 2026-04-16T19:23:17.273685+0000 mgr.vm01.nwhpas (mgr.14227) 144 : cluster [DBG] pgmap v71: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 328 KiB/s rd, 8.5 KiB/s wr, 608 op/s 2026-04-16T19:23:18.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 bash[34817]: cluster 2026-04-16T19:23:17.273685+0000 mgr.vm01.nwhpas (mgr.14227) 144 : cluster [DBG] pgmap v71: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 328 KiB/s rd, 8.5 KiB/s wr, 608 op/s 2026-04-16T19:23:18.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 bash[34817]: audit 2026-04-16T19:23:17.419865+0000 mgr.vm01.nwhpas (mgr.14227) 145 : audit [DBG] from='client.14634 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:18.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 bash[34817]: audit 2026-04-16T19:23:17.419865+0000 mgr.vm01.nwhpas (mgr.14227) 145 : audit [DBG] from='client.14634 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:18.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 bash[34817]: audit 2026-04-16T19:23:18.576350+0000 mon.vm01 (mon.0) 793 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:18.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 bash[34817]: audit 2026-04-16T19:23:18.576350+0000 mon.vm01 (mon.0) 793 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:18.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 bash[34817]: audit 2026-04-16T19:23:18.582065+0000 mon.vm01 (mon.0) 794 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:18.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 bash[34817]: audit 2026-04-16T19:23:18.582065+0000 mon.vm01 (mon.0) 794 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:18.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 bash[34817]: audit 2026-04-16T19:23:18.590909+0000 mon.vm01 (mon.0) 795 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:18.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:18 vm04 bash[34817]: audit 2026-04-16T19:23:18.590909+0000 mon.vm01 (mon.0) 795 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:19.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:18 vm01 bash[28222]: cluster 2026-04-16T19:23:17.273685+0000 mgr.vm01.nwhpas (mgr.14227) 144 : cluster [DBG] pgmap v71: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 328 KiB/s rd, 8.5 KiB/s wr, 608 op/s 2026-04-16T19:23:19.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:18 vm01 bash[28222]: cluster 2026-04-16T19:23:17.273685+0000 mgr.vm01.nwhpas (mgr.14227) 144 : cluster [DBG] pgmap v71: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 328 KiB/s rd, 8.5 KiB/s wr, 608 op/s 2026-04-16T19:23:19.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:18 vm01 bash[28222]: audit 2026-04-16T19:23:17.419865+0000 mgr.vm01.nwhpas (mgr.14227) 145 : audit [DBG] from='client.14634 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:19.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:18 vm01 bash[28222]: audit 2026-04-16T19:23:17.419865+0000 mgr.vm01.nwhpas (mgr.14227) 145 : audit [DBG] from='client.14634 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:19.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:18 vm01 bash[28222]: audit 2026-04-16T19:23:18.576350+0000 mon.vm01 (mon.0) 793 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:19.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:18 vm01 bash[28222]: audit 2026-04-16T19:23:18.576350+0000 mon.vm01 (mon.0) 793 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:19.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:18 vm01 bash[28222]: audit 2026-04-16T19:23:18.582065+0000 mon.vm01 (mon.0) 794 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:19.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:18 vm01 bash[28222]: audit 2026-04-16T19:23:18.582065+0000 mon.vm01 (mon.0) 794 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:19.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:18 vm01 bash[28222]: audit 2026-04-16T19:23:18.590909+0000 mon.vm01 (mon.0) 795 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:19.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:18 vm01 bash[28222]: audit 2026-04-16T19:23:18.590909+0000 mon.vm01 (mon.0) 795 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:19.254 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:19.254 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:18.591166Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:19.322 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:20.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: cephadm 2026-04-16T19:23:18.592069+0000 mgr.vm01.nwhpas (mgr.14227) 146 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm01 interface ens3 2026-04-16T19:23:20.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: cephadm 2026-04-16T19:23:18.592069+0000 mgr.vm01.nwhpas (mgr.14227) 146 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm01 interface ens3 2026-04-16T19:23:20.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: cephadm 2026-04-16T19:23:18.592112+0000 mgr.vm01.nwhpas (mgr.14227) 147 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm04 interface ens3 2026-04-16T19:23:20.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: cephadm 2026-04-16T19:23:18.592112+0000 mgr.vm01.nwhpas (mgr.14227) 147 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm04 interface ens3 2026-04-16T19:23:20.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: cephadm 2026-04-16T19:23:18.592461+0000 mgr.vm01.nwhpas (mgr.14227) 148 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm01.mbrpom on vm01 2026-04-16T19:23:20.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: cephadm 2026-04-16T19:23:18.592461+0000 mgr.vm01.nwhpas (mgr.14227) 148 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm01.mbrpom on vm01 2026-04-16T19:23:20.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: audit 2026-04-16T19:23:19.249709+0000 mon.vm01 (mon.0) 796 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: audit 2026-04-16T19:23:19.249709+0000 mon.vm01 (mon.0) 796 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: audit 2026-04-16T19:23:19.250508+0000 mon.vm01 (mon.0) 797 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: audit 2026-04-16T19:23:19.250508+0000 mon.vm01 (mon.0) 797 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: audit 2026-04-16T19:23:19.251019+0000 mon.vm01 (mon.0) 798 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: audit 2026-04-16T19:23:19.251019+0000 mon.vm01 (mon.0) 798 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: audit 2026-04-16T19:23:19.251484+0000 mon.vm01 (mon.0) 799 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:19 vm04 bash[34817]: audit 2026-04-16T19:23:19.251484+0000 mon.vm01 (mon.0) 799 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: cephadm 2026-04-16T19:23:18.592069+0000 mgr.vm01.nwhpas (mgr.14227) 146 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm01 interface ens3 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: cephadm 2026-04-16T19:23:18.592069+0000 mgr.vm01.nwhpas (mgr.14227) 146 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm01 interface ens3 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: cephadm 2026-04-16T19:23:18.592112+0000 mgr.vm01.nwhpas (mgr.14227) 147 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm04 interface ens3 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: cephadm 2026-04-16T19:23:18.592112+0000 mgr.vm01.nwhpas (mgr.14227) 147 : cephadm [INF] 12.12.1.101 is in 12.12.0.0/22 on vm04 interface ens3 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: cephadm 2026-04-16T19:23:18.592461+0000 mgr.vm01.nwhpas (mgr.14227) 148 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm01.mbrpom on vm01 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: cephadm 2026-04-16T19:23:18.592461+0000 mgr.vm01.nwhpas (mgr.14227) 148 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm01.mbrpom on vm01 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: audit 2026-04-16T19:23:19.249709+0000 mon.vm01 (mon.0) 796 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: audit 2026-04-16T19:23:19.249709+0000 mon.vm01 (mon.0) 796 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: audit 2026-04-16T19:23:19.250508+0000 mon.vm01 (mon.0) 797 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: audit 2026-04-16T19:23:19.250508+0000 mon.vm01 (mon.0) 797 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: audit 2026-04-16T19:23:19.251019+0000 mon.vm01 (mon.0) 798 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: audit 2026-04-16T19:23:19.251019+0000 mon.vm01 (mon.0) 798 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: audit 2026-04-16T19:23:19.251484+0000 mon.vm01 (mon.0) 799 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:19 vm01 bash[28222]: audit 2026-04-16T19:23:19.251484+0000 mon.vm01 (mon.0) 799 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:20.323 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:20.625 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:20.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:20 vm01 bash[28222]: audit 2026-04-16T19:23:19.248878+0000 mgr.vm01.nwhpas (mgr.14227) 149 : audit [DBG] from='client.14638 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:20.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:20 vm01 bash[28222]: audit 2026-04-16T19:23:19.248878+0000 mgr.vm01.nwhpas (mgr.14227) 149 : audit [DBG] from='client.14638 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:20.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:20 vm01 bash[28222]: cluster 2026-04-16T19:23:19.274102+0000 mgr.vm01.nwhpas (mgr.14227) 150 : cluster [DBG] pgmap v72: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 291 KiB/s rd, 7.5 KiB/s wr, 538 op/s 2026-04-16T19:23:20.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:20 vm01 bash[28222]: cluster 2026-04-16T19:23:19.274102+0000 mgr.vm01.nwhpas (mgr.14227) 150 : cluster [DBG] pgmap v72: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 291 KiB/s rd, 7.5 KiB/s wr, 538 op/s 2026-04-16T19:23:21.068 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:21.069 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:18.591166Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:21.196 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:21.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:20 vm04 bash[34817]: audit 2026-04-16T19:23:19.248878+0000 mgr.vm01.nwhpas (mgr.14227) 149 : audit [DBG] from='client.14638 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:21.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:20 vm04 bash[34817]: audit 2026-04-16T19:23:19.248878+0000 mgr.vm01.nwhpas (mgr.14227) 149 : audit [DBG] from='client.14638 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:21.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:20 vm04 bash[34817]: cluster 2026-04-16T19:23:19.274102+0000 mgr.vm01.nwhpas (mgr.14227) 150 : cluster [DBG] pgmap v72: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 291 KiB/s rd, 7.5 KiB/s wr, 538 op/s 2026-04-16T19:23:21.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:20 vm04 bash[34817]: cluster 2026-04-16T19:23:19.274102+0000 mgr.vm01.nwhpas (mgr.14227) 150 : cluster [DBG] pgmap v72: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 291 KiB/s rd, 7.5 KiB/s wr, 538 op/s 2026-04-16T19:23:22.196 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:22.204 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:21 vm01 bash[28222]: audit 2026-04-16T19:23:21.064226+0000 mon.vm01 (mon.0) 800 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.204 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:21 vm01 bash[28222]: audit 2026-04-16T19:23:21.064226+0000 mon.vm01 (mon.0) 800 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.204 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:21 vm01 bash[28222]: audit 2026-04-16T19:23:21.064900+0000 mon.vm01 (mon.0) 801 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.204 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:21 vm01 bash[28222]: audit 2026-04-16T19:23:21.064900+0000 mon.vm01 (mon.0) 801 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.204 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:21 vm01 bash[28222]: audit 2026-04-16T19:23:21.065401+0000 mon.vm01 (mon.0) 802 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.204 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:21 vm01 bash[28222]: audit 2026-04-16T19:23:21.065401+0000 mon.vm01 (mon.0) 802 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.204 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:21 vm01 bash[28222]: audit 2026-04-16T19:23:21.065984+0000 mon.vm01 (mon.0) 803 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.204 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:21 vm01 bash[28222]: audit 2026-04-16T19:23:21.065984+0000 mon.vm01 (mon.0) 803 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:21 vm04 bash[34817]: audit 2026-04-16T19:23:21.064226+0000 mon.vm01 (mon.0) 800 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:21 vm04 bash[34817]: audit 2026-04-16T19:23:21.064226+0000 mon.vm01 (mon.0) 800 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:21 vm04 bash[34817]: audit 2026-04-16T19:23:21.064900+0000 mon.vm01 (mon.0) 801 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:21 vm04 bash[34817]: audit 2026-04-16T19:23:21.064900+0000 mon.vm01 (mon.0) 801 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:21 vm04 bash[34817]: audit 2026-04-16T19:23:21.065401+0000 mon.vm01 (mon.0) 802 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:21 vm04 bash[34817]: audit 2026-04-16T19:23:21.065401+0000 mon.vm01 (mon.0) 802 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:21 vm04 bash[34817]: audit 2026-04-16T19:23:21.065984+0000 mon.vm01 (mon.0) 803 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:21 vm04 bash[34817]: audit 2026-04-16T19:23:21.065984+0000 mon.vm01 (mon.0) 803 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:22.470 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:22.880 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:22 vm01 bash[28222]: audit 2026-04-16T19:23:21.063561+0000 mgr.vm01.nwhpas (mgr.14227) 151 : audit [DBG] from='client.14642 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:22.880 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:22 vm01 bash[28222]: audit 2026-04-16T19:23:21.063561+0000 mgr.vm01.nwhpas (mgr.14227) 151 : audit [DBG] from='client.14642 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:22.880 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:22 vm01 bash[28222]: cluster 2026-04-16T19:23:21.274680+0000 mgr.vm01.nwhpas (mgr.14227) 152 : cluster [DBG] pgmap v73: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 252 KiB/s rd, 6.5 KiB/s wr, 467 op/s 2026-04-16T19:23:22.880 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:22 vm01 bash[28222]: cluster 2026-04-16T19:23:21.274680+0000 mgr.vm01.nwhpas (mgr.14227) 152 : cluster [DBG] pgmap v73: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 252 KiB/s rd, 6.5 KiB/s wr, 467 op/s 2026-04-16T19:23:22.880 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:22 vm01 bash[28222]: audit 2026-04-16T19:23:22.560275+0000 mon.vm01 (mon.0) 804 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:22.880 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:22 vm01 bash[28222]: audit 2026-04-16T19:23:22.560275+0000 mon.vm01 (mon.0) 804 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:22.880 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:22 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:23.001 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:23.001 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:18.591166Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:23.130 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 systemd[1]: /etc/systemd/system/ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-16T19:23:23.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:22 vm04 bash[34817]: audit 2026-04-16T19:23:21.063561+0000 mgr.vm01.nwhpas (mgr.14227) 151 : audit [DBG] from='client.14642 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:23.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:22 vm04 bash[34817]: audit 2026-04-16T19:23:21.063561+0000 mgr.vm01.nwhpas (mgr.14227) 151 : audit [DBG] from='client.14642 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:23.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:22 vm04 bash[34817]: cluster 2026-04-16T19:23:21.274680+0000 mgr.vm01.nwhpas (mgr.14227) 152 : cluster [DBG] pgmap v73: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 252 KiB/s rd, 6.5 KiB/s wr, 467 op/s 2026-04-16T19:23:23.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:22 vm04 bash[34817]: cluster 2026-04-16T19:23:21.274680+0000 mgr.vm01.nwhpas (mgr.14227) 152 : cluster [DBG] pgmap v73: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 252 KiB/s rd, 6.5 KiB/s wr, 467 op/s 2026-04-16T19:23:23.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:22 vm04 bash[34817]: audit 2026-04-16T19:23:22.560275+0000 mon.vm01 (mon.0) 804 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:23.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:22 vm04 bash[34817]: audit 2026-04-16T19:23:22.560275+0000 mon.vm01 (mon.0) 804 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:23.314 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:22.996750+0000 mon.vm01 (mon.0) 805 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:22.996750+0000 mon.vm01 (mon.0) 805 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:22.997465+0000 mon.vm01 (mon.0) 806 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:22.997465+0000 mon.vm01 (mon.0) 806 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:22.997897+0000 mon.vm01 (mon.0) 807 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:22.997897+0000 mon.vm01 (mon.0) 807 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:22.998271+0000 mon.vm01 (mon.0) 808 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:22.998271+0000 mon.vm01 (mon.0) 808 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:23.262979+0000 mon.vm01 (mon.0) 809 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:23.262979+0000 mon.vm01 (mon.0) 809 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:23.268996+0000 mon.vm01 (mon.0) 810 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:23.268996+0000 mon.vm01 (mon.0) 810 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:23.274104+0000 mon.vm01 (mon.0) 811 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:23.274104+0000 mon.vm01 (mon.0) 811 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:23.280920+0000 mon.vm01 (mon.0) 812 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:23.280920+0000 mon.vm01 (mon.0) 812 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:23.297312+0000 mon.vm01 (mon.0) 813 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:24.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:23 vm04 bash[34817]: audit 2026-04-16T19:23:23.297312+0000 mon.vm01 (mon.0) 813 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:22.996750+0000 mon.vm01 (mon.0) 805 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:22.996750+0000 mon.vm01 (mon.0) 805 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:22.997465+0000 mon.vm01 (mon.0) 806 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:22.997465+0000 mon.vm01 (mon.0) 806 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:22.997897+0000 mon.vm01 (mon.0) 807 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:22.997897+0000 mon.vm01 (mon.0) 807 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:22.998271+0000 mon.vm01 (mon.0) 808 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:22.998271+0000 mon.vm01 (mon.0) 808 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:23.262979+0000 mon.vm01 (mon.0) 809 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:23.262979+0000 mon.vm01 (mon.0) 809 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:23.268996+0000 mon.vm01 (mon.0) 810 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:23.268996+0000 mon.vm01 (mon.0) 810 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:23.274104+0000 mon.vm01 (mon.0) 811 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:23.274104+0000 mon.vm01 (mon.0) 811 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:23.280920+0000 mon.vm01 (mon.0) 812 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:23.280920+0000 mon.vm01 (mon.0) 812 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:23.297312+0000 mon.vm01 (mon.0) 813 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:24.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:23 vm01 bash[28222]: audit 2026-04-16T19:23:23.297312+0000 mon.vm01 (mon.0) 813 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:24.315 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:24.584 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:24.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:24 vm01 bash[28222]: audit 2026-04-16T19:23:22.995979+0000 mgr.vm01.nwhpas (mgr.14227) 153 : audit [DBG] from='client.14646 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:24.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:24 vm01 bash[28222]: audit 2026-04-16T19:23:22.995979+0000 mgr.vm01.nwhpas (mgr.14227) 153 : audit [DBG] from='client.14646 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:24.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:24 vm01 bash[28222]: cluster 2026-04-16T19:23:23.275028+0000 mgr.vm01.nwhpas (mgr.14227) 154 : cluster [DBG] pgmap v74: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 216 KiB/s rd, 5.6 KiB/s wr, 400 op/s 2026-04-16T19:23:24.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:24 vm01 bash[28222]: cluster 2026-04-16T19:23:23.275028+0000 mgr.vm01.nwhpas (mgr.14227) 154 : cluster [DBG] pgmap v74: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 216 KiB/s rd, 5.6 KiB/s wr, 400 op/s 2026-04-16T19:23:24.988 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:24.988 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:23.281142Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:25.059 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:25.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:24 vm04 bash[34817]: audit 2026-04-16T19:23:22.995979+0000 mgr.vm01.nwhpas (mgr.14227) 153 : audit [DBG] from='client.14646 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:25.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:24 vm04 bash[34817]: audit 2026-04-16T19:23:22.995979+0000 mgr.vm01.nwhpas (mgr.14227) 153 : audit [DBG] from='client.14646 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:25.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:24 vm04 bash[34817]: cluster 2026-04-16T19:23:23.275028+0000 mgr.vm01.nwhpas (mgr.14227) 154 : cluster [DBG] pgmap v74: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 216 KiB/s rd, 5.6 KiB/s wr, 400 op/s 2026-04-16T19:23:25.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:24 vm04 bash[34817]: cluster 2026-04-16T19:23:23.275028+0000 mgr.vm01.nwhpas (mgr.14227) 154 : cluster [DBG] pgmap v74: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 216 KiB/s rd, 5.6 KiB/s wr, 400 op/s 2026-04-16T19:23:26.060 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:26.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.899973+0000 mon.vm01 (mon.0) 814 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:26.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.899973+0000 mon.vm01 (mon.0) 814 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:26.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.983261+0000 mgr.vm01.nwhpas (mgr.14227) 155 : audit [DBG] from='client.14650 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:26.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.983261+0000 mgr.vm01.nwhpas (mgr.14227) 155 : audit [DBG] from='client.14650 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:26.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.983899+0000 mon.vm01 (mon.0) 815 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.983899+0000 mon.vm01 (mon.0) 815 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.984523+0000 mon.vm01 (mon.0) 816 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.984523+0000 mon.vm01 (mon.0) 816 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.984994+0000 mon.vm01 (mon.0) 817 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.984994+0000 mon.vm01 (mon.0) 817 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.985685+0000 mon.vm01 (mon.0) 818 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: audit 2026-04-16T19:23:24.985685+0000 mon.vm01 (mon.0) 818 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: cluster 2026-04-16T19:23:25.275492+0000 mgr.vm01.nwhpas (mgr.14227) 156 : cluster [DBG] pgmap v75: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 168 KiB/s rd, 4.7 KiB/s wr, 310 op/s 2026-04-16T19:23:26.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:25 vm04 bash[34817]: cluster 2026-04-16T19:23:25.275492+0000 mgr.vm01.nwhpas (mgr.14227) 156 : cluster [DBG] pgmap v75: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 168 KiB/s rd, 4.7 KiB/s wr, 310 op/s 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.899973+0000 mon.vm01 (mon.0) 814 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.899973+0000 mon.vm01 (mon.0) 814 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.983261+0000 mgr.vm01.nwhpas (mgr.14227) 155 : audit [DBG] from='client.14650 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.983261+0000 mgr.vm01.nwhpas (mgr.14227) 155 : audit [DBG] from='client.14650 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.983899+0000 mon.vm01 (mon.0) 815 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.983899+0000 mon.vm01 (mon.0) 815 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.984523+0000 mon.vm01 (mon.0) 816 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.984523+0000 mon.vm01 (mon.0) 816 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.984994+0000 mon.vm01 (mon.0) 817 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.984994+0000 mon.vm01 (mon.0) 817 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.985685+0000 mon.vm01 (mon.0) 818 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: audit 2026-04-16T19:23:24.985685+0000 mon.vm01 (mon.0) 818 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: cluster 2026-04-16T19:23:25.275492+0000 mgr.vm01.nwhpas (mgr.14227) 156 : cluster [DBG] pgmap v75: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 168 KiB/s rd, 4.7 KiB/s wr, 310 op/s 2026-04-16T19:23:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:25 vm01 bash[28222]: cluster 2026-04-16T19:23:25.275492+0000 mgr.vm01.nwhpas (mgr.14227) 156 : cluster [DBG] pgmap v75: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 168 KiB/s rd, 4.7 KiB/s wr, 310 op/s 2026-04-16T19:23:26.330 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:26.714 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:26.714 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.180962Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.181143Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:23.281142Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.181080Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.181021Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.180875Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.180929Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-16T19:23:26.790 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-16T19:23:27.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:26 vm04 bash[34817]: audit 2026-04-16T19:23:26.708517+0000 mgr.vm01.nwhpas (mgr.14227) 157 : audit [DBG] from='client.14654 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:27.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:26 vm04 bash[34817]: audit 2026-04-16T19:23:26.708517+0000 mgr.vm01.nwhpas (mgr.14227) 157 : audit [DBG] from='client.14654 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:27.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:26 vm04 bash[34817]: audit 2026-04-16T19:23:26.709196+0000 mon.vm01 (mon.0) 819 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:26 vm04 bash[34817]: audit 2026-04-16T19:23:26.709196+0000 mon.vm01 (mon.0) 819 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:26 vm04 bash[34817]: audit 2026-04-16T19:23:26.710025+0000 mon.vm01 (mon.0) 820 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:26 vm04 bash[34817]: audit 2026-04-16T19:23:26.710025+0000 mon.vm01 (mon.0) 820 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:26 vm04 bash[34817]: audit 2026-04-16T19:23:26.710658+0000 mon.vm01 (mon.0) 821 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:26 vm04 bash[34817]: audit 2026-04-16T19:23:26.710658+0000 mon.vm01 (mon.0) 821 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:26 vm04 bash[34817]: audit 2026-04-16T19:23:26.711254+0000 mon.vm01 (mon.0) 822 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:26 vm04 bash[34817]: audit 2026-04-16T19:23:26.711254+0000 mon.vm01 (mon.0) 822 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:26 vm01 bash[28222]: audit 2026-04-16T19:23:26.708517+0000 mgr.vm01.nwhpas (mgr.14227) 157 : audit [DBG] from='client.14654 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:27.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:26 vm01 bash[28222]: audit 2026-04-16T19:23:26.708517+0000 mgr.vm01.nwhpas (mgr.14227) 157 : audit [DBG] from='client.14654 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:27.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:26 vm01 bash[28222]: audit 2026-04-16T19:23:26.709196+0000 mon.vm01 (mon.0) 819 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:26 vm01 bash[28222]: audit 2026-04-16T19:23:26.709196+0000 mon.vm01 (mon.0) 819 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:26 vm01 bash[28222]: audit 2026-04-16T19:23:26.710025+0000 mon.vm01 (mon.0) 820 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:26 vm01 bash[28222]: audit 2026-04-16T19:23:26.710025+0000 mon.vm01 (mon.0) 820 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:26 vm01 bash[28222]: audit 2026-04-16T19:23:26.710658+0000 mon.vm01 (mon.0) 821 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:26 vm01 bash[28222]: audit 2026-04-16T19:23:26.710658+0000 mon.vm01 (mon.0) 821 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:26 vm01 bash[28222]: audit 2026-04-16T19:23:26.711254+0000 mon.vm01 (mon.0) 822 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:26 vm01 bash[28222]: audit 2026-04-16T19:23:26.711254+0000 mon.vm01 (mon.0) 822 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:27.791 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:28.083 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:28.108 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:27 vm01 bash[28222]: cluster 2026-04-16T19:23:27.275942+0000 mgr.vm01.nwhpas (mgr.14227) 158 : cluster [DBG] pgmap v76: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 2.3 KiB/s wr, 138 op/s 2026-04-16T19:23:28.108 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:27 vm01 bash[28222]: cluster 2026-04-16T19:23:27.275942+0000 mgr.vm01.nwhpas (mgr.14227) 158 : cluster [DBG] pgmap v76: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 2.3 KiB/s wr, 138 op/s 2026-04-16T19:23:28.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:27 vm04 bash[34817]: cluster 2026-04-16T19:23:27.275942+0000 mgr.vm01.nwhpas (mgr.14227) 158 : cluster [DBG] pgmap v76: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 2.3 KiB/s wr, 138 op/s 2026-04-16T19:23:28.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:27 vm04 bash[34817]: cluster 2026-04-16T19:23:27.275942+0000 mgr.vm01.nwhpas (mgr.14227) 158 : cluster [DBG] pgmap v76: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 74 KiB/s rd, 2.3 KiB/s wr, 138 op/s 2026-04-16T19:23:28.522 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:28.523 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:22:58.785406Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:22:58.785314Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:22:58.785282Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:22:58.785454Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:23.281142Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "last_refresh": "2026-04-16T19:23:28.146517Z", "ports": [9000, 9001], "running": 2, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:22:58.785376Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:22:58.785245Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:22:58.785519Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:22:58.785148Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "last_refresh": "2026-04-16T19:22:58.785551Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "last_refresh": "2026-04-16T19:23:28.146551Z", "ports": [8000, 8001], "running": 2, "size": 4}}] 2026-04-16T19:23:28.598 INFO:tasks.cephadm:rgw.foo has 2/4 2026-04-16T19:23:29.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.154060+0000 mon.vm01 (mon.0) 823 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.154060+0000 mon.vm01 (mon.0) 823 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.161228+0000 mon.vm01 (mon.0) 824 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.161228+0000 mon.vm01 (mon.0) 824 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.513532+0000 mgr.vm01.nwhpas (mgr.14227) 159 : audit [DBG] from='client.14658 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.513532+0000 mgr.vm01.nwhpas (mgr.14227) 159 : audit [DBG] from='client.14658 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.514196+0000 mon.vm01 (mon.0) 825 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.514196+0000 mon.vm01 (mon.0) 825 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.514907+0000 mon.vm01 (mon.0) 826 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.514907+0000 mon.vm01 (mon.0) 826 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.515440+0000 mon.vm01 (mon.0) 827 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.515440+0000 mon.vm01 (mon.0) 827 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.515949+0000 mon.vm01 (mon.0) 828 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.515949+0000 mon.vm01 (mon.0) 828 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.779210+0000 mon.vm01 (mon.0) 829 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.779210+0000 mon.vm01 (mon.0) 829 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.786727+0000 mon.vm01 (mon.0) 830 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.786727+0000 mon.vm01 (mon.0) 830 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.787772+0000 mon.vm01 (mon.0) 831 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.787772+0000 mon.vm01 (mon.0) 831 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.788345+0000 mon.vm01 (mon.0) 832 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:28.788345+0000 mon.vm01 (mon.0) 832 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: cephadm 2026-04-16T19:23:28.791076+0000 mgr.vm01.nwhpas (mgr.14227) 160 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: cephadm 2026-04-16T19:23:28.791076+0000 mgr.vm01.nwhpas (mgr.14227) 160 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:29.151605+0000 mon.vm01 (mon.0) 833 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:29 vm04 bash[34817]: audit 2026-04-16T19:23:29.151605+0000 mon.vm01 (mon.0) 833 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.154060+0000 mon.vm01 (mon.0) 823 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.154060+0000 mon.vm01 (mon.0) 823 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.161228+0000 mon.vm01 (mon.0) 824 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.161228+0000 mon.vm01 (mon.0) 824 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.513532+0000 mgr.vm01.nwhpas (mgr.14227) 159 : audit [DBG] from='client.14658 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:29.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.513532+0000 mgr.vm01.nwhpas (mgr.14227) 159 : audit [DBG] from='client.14658 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:29.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.514196+0000 mon.vm01 (mon.0) 825 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.514196+0000 mon.vm01 (mon.0) 825 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.514907+0000 mon.vm01 (mon.0) 826 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.514907+0000 mon.vm01 (mon.0) 826 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.515440+0000 mon.vm01 (mon.0) 827 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.515440+0000 mon.vm01 (mon.0) 827 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.515949+0000 mon.vm01 (mon.0) 828 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.515949+0000 mon.vm01 (mon.0) 828 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.779210+0000 mon.vm01 (mon.0) 829 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.779210+0000 mon.vm01 (mon.0) 829 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.786727+0000 mon.vm01 (mon.0) 830 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.786727+0000 mon.vm01 (mon.0) 830 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.787772+0000 mon.vm01 (mon.0) 831 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.787772+0000 mon.vm01 (mon.0) 831 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.788345+0000 mon.vm01 (mon.0) 832 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:28.788345+0000 mon.vm01 (mon.0) 832 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: cephadm 2026-04-16T19:23:28.791076+0000 mgr.vm01.nwhpas (mgr.14227) 160 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: cephadm 2026-04-16T19:23:28.791076+0000 mgr.vm01.nwhpas (mgr.14227) 160 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:29.151605+0000 mon.vm01 (mon.0) 833 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:29 vm01 bash[28222]: audit 2026-04-16T19:23:29.151605+0000 mon.vm01 (mon.0) 833 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:29.598 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:29.892 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:30.376 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:30.376 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:23:28.771678Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:23:28.146602Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:23:28.146852Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:23:28.771712Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:23.281142Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "last_refresh": "2026-04-16T19:23:28.146517Z", "ports": [9000, 9001], "running": 4, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:23:28.146754Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:23:28.146693Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:23:28.146406Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:23:28.146465Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "ports": [9095], "running": 0, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "last_refresh": "2026-04-16T19:23:28.146551Z", "ports": [8000, 8001], "running": 4, "size": 4}}] 2026-04-16T19:23:30.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.161792+0000 mon.vm01 (mon.0) 834 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.161792+0000 mon.vm01 (mon.0) 834 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.167312+0000 mon.vm01 (mon.0) 835 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.167312+0000 mon.vm01 (mon.0) 835 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.170206+0000 mon.vm01 (mon.0) 836 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:30.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.170206+0000 mon.vm01 (mon.0) 836 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:30.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: cephadm 2026-04-16T19:23:29.187913+0000 mgr.vm01.nwhpas (mgr.14227) 161 : cephadm [INF] Reconfiguring prometheus.vm01 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'ingress', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ingress'}) 2026-04-16T19:23:30.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: cephadm 2026-04-16T19:23:29.187913+0000 mgr.vm01.nwhpas (mgr.14227) 161 : cephadm [INF] Reconfiguring prometheus.vm01 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'ingress', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ingress'}) 2026-04-16T19:23:30.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: cluster 2026-04-16T19:23:29.276616+0000 mgr.vm01.nwhpas (mgr.14227) 162 : cluster [DBG] pgmap v77: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: cluster 2026-04-16T19:23:29.276616+0000 mgr.vm01.nwhpas (mgr.14227) 162 : cluster [DBG] pgmap v77: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: cephadm 2026-04-16T19:23:29.344582+0000 mgr.vm01.nwhpas (mgr.14227) 163 : cephadm [INF] Reconfiguring daemon prometheus.vm01 on vm01 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: cephadm 2026-04-16T19:23:29.344582+0000 mgr.vm01.nwhpas (mgr.14227) 163 : cephadm [INF] Reconfiguring daemon prometheus.vm01 on vm01 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.594047+0000 mon.vm01 (mon.0) 837 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.594047+0000 mon.vm01 (mon.0) 837 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.594697+0000 mgr.vm01.nwhpas (mgr.14227) 164 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.594697+0000 mgr.vm01.nwhpas (mgr.14227) 164 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.708397+0000 mon.vm01 (mon.0) 838 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.708397+0000 mon.vm01 (mon.0) 838 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.708962+0000 mgr.vm01.nwhpas (mgr.14227) 165 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:29.708962+0000 mgr.vm01.nwhpas (mgr.14227) 165 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.098345+0000 mon.vm01 (mon.0) 839 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.098345+0000 mon.vm01 (mon.0) 839 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.105170+0000 mon.vm01 (mon.0) 840 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.105170+0000 mon.vm01 (mon.0) 840 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.113272+0000 mon.vm01 (mon.0) 841 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.113272+0000 mon.vm01 (mon.0) 841 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.119158+0000 mon.vm01 (mon.0) 842 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.119158+0000 mon.vm01 (mon.0) 842 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.125835+0000 mon.vm01 (mon.0) 843 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.125835+0000 mon.vm01 (mon.0) 843 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.132099+0000 mon.vm01 (mon.0) 844 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.132099+0000 mon.vm01 (mon.0) 844 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.140411+0000 mon.vm01 (mon.0) 845 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.140411+0000 mon.vm01 (mon.0) 845 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.146296+0000 mon.vm01 (mon.0) 846 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.146296+0000 mon.vm01 (mon.0) 846 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.151983+0000 mon.vm01 (mon.0) 847 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:30 vm01 bash[28222]: audit 2026-04-16T19:23:30.151983+0000 mon.vm01 (mon.0) 847 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.451 INFO:tasks.cephadm:rgw.foo has 4/4 2026-04-16T19:23:30.451 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-04-16T19:23:30.453 INFO:tasks.cephadm:Waiting for ceph service ingress.rgw.foo to start (timeout 300)... 2026-04-16T19:23:30.454 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph orch ls -f json 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.161792+0000 mon.vm01 (mon.0) 834 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.161792+0000 mon.vm01 (mon.0) 834 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.167312+0000 mon.vm01 (mon.0) 835 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.167312+0000 mon.vm01 (mon.0) 835 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.170206+0000 mon.vm01 (mon.0) 836 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.170206+0000 mon.vm01 (mon.0) 836 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: cephadm 2026-04-16T19:23:29.187913+0000 mgr.vm01.nwhpas (mgr.14227) 161 : cephadm [INF] Reconfiguring prometheus.vm01 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'ingress', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ingress'}) 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: cephadm 2026-04-16T19:23:29.187913+0000 mgr.vm01.nwhpas (mgr.14227) 161 : cephadm [INF] Reconfiguring prometheus.vm01 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm01', 'ceph-exporter.vm04', 'ingress', 'mgr.vm01.nwhpas', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ingress'}) 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: cluster 2026-04-16T19:23:29.276616+0000 mgr.vm01.nwhpas (mgr.14227) 162 : cluster [DBG] pgmap v77: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: cluster 2026-04-16T19:23:29.276616+0000 mgr.vm01.nwhpas (mgr.14227) 162 : cluster [DBG] pgmap v77: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: cephadm 2026-04-16T19:23:29.344582+0000 mgr.vm01.nwhpas (mgr.14227) 163 : cephadm [INF] Reconfiguring daemon prometheus.vm01 on vm01 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: cephadm 2026-04-16T19:23:29.344582+0000 mgr.vm01.nwhpas (mgr.14227) 163 : cephadm [INF] Reconfiguring daemon prometheus.vm01 on vm01 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.594047+0000 mon.vm01 (mon.0) 837 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.594047+0000 mon.vm01 (mon.0) 837 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.594697+0000 mgr.vm01.nwhpas (mgr.14227) 164 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:23:30.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.594697+0000 mgr.vm01.nwhpas (mgr.14227) 164 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.708397+0000 mon.vm01 (mon.0) 838 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.708397+0000 mon.vm01 (mon.0) 838 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.708962+0000 mgr.vm01.nwhpas (mgr.14227) 165 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:29.708962+0000 mgr.vm01.nwhpas (mgr.14227) 165 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.098345+0000 mon.vm01 (mon.0) 839 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.098345+0000 mon.vm01 (mon.0) 839 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.105170+0000 mon.vm01 (mon.0) 840 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.105170+0000 mon.vm01 (mon.0) 840 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.113272+0000 mon.vm01 (mon.0) 841 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.113272+0000 mon.vm01 (mon.0) 841 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.119158+0000 mon.vm01 (mon.0) 842 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.119158+0000 mon.vm01 (mon.0) 842 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.125835+0000 mon.vm01 (mon.0) 843 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.125835+0000 mon.vm01 (mon.0) 843 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.132099+0000 mon.vm01 (mon.0) 844 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.132099+0000 mon.vm01 (mon.0) 844 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.140411+0000 mon.vm01 (mon.0) 845 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.140411+0000 mon.vm01 (mon.0) 845 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.146296+0000 mon.vm01 (mon.0) 846 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.146296+0000 mon.vm01 (mon.0) 846 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.151983+0000 mon.vm01 (mon.0) 847 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:30 vm04 bash[34817]: audit 2026-04-16T19:23:30.151983+0000 mon.vm01 (mon.0) 847 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:30.741 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:31.134 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-16T19:23:31.135 INFO:teuthology.orchestra.run.vm01.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-16T19:20:42.403852Z", "last_refresh": "2026-04-16T19:23:28.771678Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:21:45.563821Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-16T19:20:40.705893Z", "last_refresh": "2026-04-16T19:23:28.146602Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:46.460933Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-16T19:20:40.274506Z", "last_refresh": "2026-04-16T19:23:28.146852Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-16T19:20:41.561235Z", "last_refresh": "2026-04-16T19:23:28.771712Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-16T19:23:23.281142Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.101/22"}, "status": {"created": "2026-04-16T19:22:54.410225Z", "last_refresh": "2026-04-16T19:23:28.146517Z", "ports": [9000, 9001], "running": 4, "size": 4, "virtual_ip": "12.12.1.101/22"}}, {"events": ["2026-04-16T19:21:48.290997Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-16T19:20:39.797005Z", "last_refresh": "2026-04-16T19:23:28.146754Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:49.549174Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm01:192.168.123.101=vm01", "vm04:192.168.123.104=vm04"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-16T19:21:08.138312Z", "last_refresh": "2026-04-16T19:23:28.146693Z", "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:47.310077Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-16T19:20:41.997024Z", "last_refresh": "2026-04-16T19:23:28.146406Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-16T19:21:58.753924Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-16T19:21:58.745310Z", "last_refresh": "2026-04-16T19:23:28.146465Z", "running": 8, "size": 8}}, {"events": ["2026-04-16T19:21:49.552771Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-16T19:20:41.137418Z", "ports": [9095], "running": 0, "size": 1}}, {"events": ["2026-04-16T19:23:05.492069Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-16T19:23:05.480486Z", "last_refresh": "2026-04-16T19:23:28.146551Z", "ports": [8000, 8001], "running": 4, "size": 4}}] 2026-04-16T19:23:31.207 INFO:tasks.cephadm:ingress.rgw.foo has 4/4 2026-04-16T19:23:31.207 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-16T19:23:31.210 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm01.local 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'echo "Check while healthy..." 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> curl http://12.12.1.101:9000/ 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> # stop each rgw in turn 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> echo "Check with each rgw stopped in turn..." 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> for rgw in `ceph orch ps | grep ^rgw.foo. | awk '"'"'{print $1}'"'"'`; do 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> ceph orch daemon stop $rgw 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> timeout 300 bash -c "while ! ceph orch ps | grep $rgw | grep stopped; do echo '"'"'Waiting for $rgw to stop'"'"'; ceph orch ps --daemon-type rgw; ceph health detail; sleep 5 ; done" 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> timeout 300 bash -c "while ! curl http://12.12.1.101:9000/ ; do echo '"'"'Waiting for http://12.12.1.101:9000/ to be available'"'"'; sleep 1 ; done" 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> ceph orch daemon start $rgw 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> timeout 300 bash -c "while ! ceph orch ps | grep $rgw | grep running; do echo '"'"'Waiting for $rgw to start'"'"'; ceph orch ps --daemon-type rgw; ceph health detail; sleep 5 ; done" 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> done 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> # stop each haproxy in turn 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> echo "Check with each haproxy down in turn..." 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '"'"'{print $1}'"'"'`; do 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> ceph orch daemon stop $haproxy 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> timeout 300 bash -c "while ! ceph orch ps | grep $haproxy | grep stopped; do echo '"'"'Waiting for $haproxy to stop'"'"'; ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done" 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> timeout 300 bash -c "while ! curl http://12.12.1.101:9000/ ; do echo '"'"'Waiting for http://12.12.1.101:9000/ to be available'"'"'; sleep 1 ; done" 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> ceph orch daemon start $haproxy 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> timeout 300 bash -c "while ! ceph orch ps | grep $haproxy | grep running; do echo '"'"'Waiting for $haproxy to start'"'"'; ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done" 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> done 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> 2026-04-16T19:23:31.210 DEBUG:teuthology.orchestra.run.vm01:> timeout 300 bash -c "while ! curl http://12.12.1.101:9000/ ; do echo '"'"'Waiting for http://12.12.1.101:9000/ to be available'"'"'; sleep 1 ; done"' 2026-04-16T19:23:31.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.162381+0000 mon.vm01 (mon.0) 848 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:31.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.162381+0000 mon.vm01 (mon.0) 848 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.165885+0000 mon.vm01 (mon.0) 849 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.165885+0000 mon.vm01 (mon.0) 849 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.370217+0000 mgr.vm01.nwhpas (mgr.14227) 166 : audit [DBG] from='client.24447 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.370217+0000 mgr.vm01.nwhpas (mgr.14227) 166 : audit [DBG] from='client.24447 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.371003+0000 mon.vm01 (mon.0) 850 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.371003+0000 mon.vm01 (mon.0) 850 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.371782+0000 mon.vm01 (mon.0) 851 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.371782+0000 mon.vm01 (mon.0) 851 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.372318+0000 mon.vm01 (mon.0) 852 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.372318+0000 mon.vm01 (mon.0) 852 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.372773+0000 mon.vm01 (mon.0) 853 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:30.372773+0000 mon.vm01 (mon.0) 853 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:31.130094+0000 mon.vm01 (mon.0) 854 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:31.130094+0000 mon.vm01 (mon.0) 854 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:31.130870+0000 mon.vm01 (mon.0) 855 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:31.130870+0000 mon.vm01 (mon.0) 855 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:31.131401+0000 mon.vm01 (mon.0) 856 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:31.131401+0000 mon.vm01 (mon.0) 856 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:31.131865+0000 mon.vm01 (mon.0) 857 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:31 vm04 bash[34817]: audit 2026-04-16T19:23:31.131865+0000 mon.vm01 (mon.0) 857 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.162381+0000 mon.vm01 (mon.0) 848 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.162381+0000 mon.vm01 (mon.0) 848 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.165885+0000 mon.vm01 (mon.0) 849 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.165885+0000 mon.vm01 (mon.0) 849 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.370217+0000 mgr.vm01.nwhpas (mgr.14227) 166 : audit [DBG] from='client.24447 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.370217+0000 mgr.vm01.nwhpas (mgr.14227) 166 : audit [DBG] from='client.24447 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.371003+0000 mon.vm01 (mon.0) 850 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.371003+0000 mon.vm01 (mon.0) 850 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.371782+0000 mon.vm01 (mon.0) 851 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.371782+0000 mon.vm01 (mon.0) 851 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.372318+0000 mon.vm01 (mon.0) 852 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.372318+0000 mon.vm01 (mon.0) 852 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.372773+0000 mon.vm01 (mon.0) 853 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:30.372773+0000 mon.vm01 (mon.0) 853 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:31.130094+0000 mon.vm01 (mon.0) 854 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:31.130094+0000 mon.vm01 (mon.0) 854 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:31.130870+0000 mon.vm01 (mon.0) 855 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:31.130870+0000 mon.vm01 (mon.0) 855 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:31.131401+0000 mon.vm01 (mon.0) 856 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:31.131401+0000 mon.vm01 (mon.0) 856 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:31.131865+0000 mon.vm01 (mon.0) 857 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:31 vm01 bash[28222]: audit 2026-04-16T19:23:31.131865+0000 mon.vm01 (mon.0) 857 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:23:31.482 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:23:31.562 INFO:teuthology.orchestra.run.vm01.stdout:Check while healthy... 2026-04-16T19:23:31.565 INFO:teuthology.orchestra.run.vm01.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-16T19:23:31.565 INFO:teuthology.orchestra.run.vm01.stderr: Dload Upload Total Spent Left Speed 2026-04-16T19:23:31.565 INFO:teuthology.orchestra.run.vm01.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-16T19:23:31.566 INFO:teuthology.orchestra.run.vm01.stdout:anonymousCheck with each rgw stopped in turn... 2026-04-16T19:23:32.076 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to stop rgw.foo.vm01.pktgwy on host 'vm01' 2026-04-16T19:23:32.299 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.pktgwy to stop 2026-04-16T19:23:32.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:32 vm04 bash[34817]: audit 2026-04-16T19:23:31.129358+0000 mgr.vm01.nwhpas (mgr.14227) 167 : audit [DBG] from='client.14698 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:32.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:32 vm04 bash[34817]: audit 2026-04-16T19:23:31.129358+0000 mgr.vm01.nwhpas (mgr.14227) 167 : audit [DBG] from='client.14698 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:32.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:32 vm04 bash[34817]: cluster 2026-04-16T19:23:31.277071+0000 mgr.vm01.nwhpas (mgr.14227) 168 : cluster [DBG] pgmap v78: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.3 KiB/s rd, 170 B/s wr, 12 op/s 2026-04-16T19:23:32.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:32 vm04 bash[34817]: cluster 2026-04-16T19:23:31.277071+0000 mgr.vm01.nwhpas (mgr.14227) 168 : cluster [DBG] pgmap v78: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.3 KiB/s rd, 170 B/s wr, 12 op/s 2026-04-16T19:23:32.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:32 vm04 bash[34817]: audit 2026-04-16T19:23:32.069259+0000 mon.vm01 (mon.0) 858 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:32.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:32 vm04 bash[34817]: audit 2026-04-16T19:23:32.069259+0000 mon.vm01 (mon.0) 858 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:32.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:32 vm04 bash[34817]: audit 2026-04-16T19:23:32.075176+0000 mon.vm01 (mon.0) 859 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:32.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:32 vm04 bash[34817]: audit 2026-04-16T19:23:32.075176+0000 mon.vm01 (mon.0) 859 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:32.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:32 vm01 bash[28222]: audit 2026-04-16T19:23:31.129358+0000 mgr.vm01.nwhpas (mgr.14227) 167 : audit [DBG] from='client.14698 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:32.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:32 vm01 bash[28222]: audit 2026-04-16T19:23:31.129358+0000 mgr.vm01.nwhpas (mgr.14227) 167 : audit [DBG] from='client.14698 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-16T19:23:32.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:32 vm01 bash[28222]: cluster 2026-04-16T19:23:31.277071+0000 mgr.vm01.nwhpas (mgr.14227) 168 : cluster [DBG] pgmap v78: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.3 KiB/s rd, 170 B/s wr, 12 op/s 2026-04-16T19:23:32.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:32 vm01 bash[28222]: cluster 2026-04-16T19:23:31.277071+0000 mgr.vm01.nwhpas (mgr.14227) 168 : cluster [DBG] pgmap v78: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.3 KiB/s rd, 170 B/s wr, 12 op/s 2026-04-16T19:23:32.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:32 vm01 bash[28222]: audit 2026-04-16T19:23:32.069259+0000 mon.vm01 (mon.0) 858 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:32.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:32 vm01 bash[28222]: audit 2026-04-16T19:23:32.069259+0000 mon.vm01 (mon.0) 858 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:32.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:32 vm01 bash[28222]: audit 2026-04-16T19:23:32.075176+0000 mon.vm01 (mon.0) 859 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:32.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:32 vm01 bash[28222]: audit 2026-04-16T19:23:32.075176+0000 mon.vm01 (mon.0) 859 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:32.492 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:23:32.492 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (29s) 3s ago 29s 97.1M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 3ec7963e1b19 2026-04-16T19:23:32.492 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (27s) 3s ago 27s 96.8M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b6cd22f5d84 2026-04-16T19:23:32.492 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (28s) 4s ago 28s 96.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:23:32.492 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (30s) 4s ago 30s 97.2M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:23:32.781 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: audit 2026-04-16T19:23:31.858538+0000 mgr.vm01.nwhpas (mgr.14227) 169 : audit [DBG] from='client.14702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: audit 2026-04-16T19:23:31.858538+0000 mgr.vm01.nwhpas (mgr.14227) 169 : audit [DBG] from='client.14702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: audit 2026-04-16T19:23:32.061612+0000 mgr.vm01.nwhpas (mgr.14227) 170 : audit [DBG] from='client.14706 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm01.pktgwy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: audit 2026-04-16T19:23:32.061612+0000 mgr.vm01.nwhpas (mgr.14227) 170 : audit [DBG] from='client.14706 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm01.pktgwy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: cephadm 2026-04-16T19:23:32.062267+0000 mgr.vm01.nwhpas (mgr.14227) 171 : cephadm [INF] Schedule stop daemon rgw.foo.vm01.pktgwy 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: cephadm 2026-04-16T19:23:32.062267+0000 mgr.vm01.nwhpas (mgr.14227) 171 : cephadm [INF] Schedule stop daemon rgw.foo.vm01.pktgwy 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: audit 2026-04-16T19:23:32.279916+0000 mgr.vm01.nwhpas (mgr.14227) 172 : audit [DBG] from='client.14710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: audit 2026-04-16T19:23:32.279916+0000 mgr.vm01.nwhpas (mgr.14227) 172 : audit [DBG] from='client.14710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: audit 2026-04-16T19:23:32.487724+0000 mgr.vm01.nwhpas (mgr.14227) 173 : audit [DBG] from='client.14714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: audit 2026-04-16T19:23:32.487724+0000 mgr.vm01.nwhpas (mgr.14227) 173 : audit [DBG] from='client.14714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: audit 2026-04-16T19:23:32.781147+0000 mon.vm01 (mon.0) 860 : audit [DBG] from='client.? 192.168.123.101:0/1855759942' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:33.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:33 vm04 bash[34817]: audit 2026-04-16T19:23:32.781147+0000 mon.vm01 (mon.0) 860 : audit [DBG] from='client.? 192.168.123.101:0/1855759942' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: audit 2026-04-16T19:23:31.858538+0000 mgr.vm01.nwhpas (mgr.14227) 169 : audit [DBG] from='client.14702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: audit 2026-04-16T19:23:31.858538+0000 mgr.vm01.nwhpas (mgr.14227) 169 : audit [DBG] from='client.14702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: audit 2026-04-16T19:23:32.061612+0000 mgr.vm01.nwhpas (mgr.14227) 170 : audit [DBG] from='client.14706 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm01.pktgwy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: audit 2026-04-16T19:23:32.061612+0000 mgr.vm01.nwhpas (mgr.14227) 170 : audit [DBG] from='client.14706 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm01.pktgwy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: cephadm 2026-04-16T19:23:32.062267+0000 mgr.vm01.nwhpas (mgr.14227) 171 : cephadm [INF] Schedule stop daemon rgw.foo.vm01.pktgwy 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: cephadm 2026-04-16T19:23:32.062267+0000 mgr.vm01.nwhpas (mgr.14227) 171 : cephadm [INF] Schedule stop daemon rgw.foo.vm01.pktgwy 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: audit 2026-04-16T19:23:32.279916+0000 mgr.vm01.nwhpas (mgr.14227) 172 : audit [DBG] from='client.14710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: audit 2026-04-16T19:23:32.279916+0000 mgr.vm01.nwhpas (mgr.14227) 172 : audit [DBG] from='client.14710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: audit 2026-04-16T19:23:32.487724+0000 mgr.vm01.nwhpas (mgr.14227) 173 : audit [DBG] from='client.14714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: audit 2026-04-16T19:23:32.487724+0000 mgr.vm01.nwhpas (mgr.14227) 173 : audit [DBG] from='client.14714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: audit 2026-04-16T19:23:32.781147+0000 mon.vm01 (mon.0) 860 : audit [DBG] from='client.? 192.168.123.101:0/1855759942' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:33.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:33 vm01 bash[28222]: audit 2026-04-16T19:23:32.781147+0000 mon.vm01 (mon.0) 860 : audit [DBG] from='client.? 192.168.123.101:0/1855759942' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:34.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:34 vm04 bash[34817]: cluster 2026-04-16T19:23:33.277398+0000 mgr.vm01.nwhpas (mgr.14227) 174 : cluster [DBG] pgmap v79: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 170 B/s wr, 15 op/s 2026-04-16T19:23:34.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:34 vm04 bash[34817]: cluster 2026-04-16T19:23:33.277398+0000 mgr.vm01.nwhpas (mgr.14227) 174 : cluster [DBG] pgmap v79: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 170 B/s wr, 15 op/s 2026-04-16T19:23:34.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:34 vm01 bash[28222]: cluster 2026-04-16T19:23:33.277398+0000 mgr.vm01.nwhpas (mgr.14227) 174 : cluster [DBG] pgmap v79: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 170 B/s wr, 15 op/s 2026-04-16T19:23:34.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:34 vm01 bash[28222]: cluster 2026-04-16T19:23:33.277398+0000 mgr.vm01.nwhpas (mgr.14227) 174 : cluster [DBG] pgmap v79: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 170 B/s wr, 15 op/s 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.050212+0000 mon.vm01 (mon.0) 861 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.050212+0000 mon.vm01 (mon.0) 861 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.054144+0000 mon.vm01 (mon.0) 862 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.054144+0000 mon.vm01 (mon.0) 862 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: cluster 2026-04-16T19:23:35.277845+0000 mgr.vm01.nwhpas (mgr.14227) 175 : cluster [DBG] pgmap v80: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: cluster 2026-04-16T19:23:35.277845+0000 mgr.vm01.nwhpas (mgr.14227) 175 : cluster [DBG] pgmap v80: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.635294+0000 mon.vm01 (mon.0) 863 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.635294+0000 mon.vm01 (mon.0) 863 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.641828+0000 mon.vm01 (mon.0) 864 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.641828+0000 mon.vm01 (mon.0) 864 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.642961+0000 mon.vm01 (mon.0) 865 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.642961+0000 mon.vm01 (mon.0) 865 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.643577+0000 mon.vm01 (mon.0) 866 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:35.643577+0000 mon.vm01 (mon.0) 866 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: cephadm 2026-04-16T19:23:35.646580+0000 mgr.vm01.nwhpas (mgr.14227) 176 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: cephadm 2026-04-16T19:23:35.646580+0000 mgr.vm01.nwhpas (mgr.14227) 176 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:36.042703+0000 mon.vm01 (mon.0) 867 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:36.042703+0000 mon.vm01 (mon.0) 867 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:36.044679+0000 mon.vm01 (mon.0) 868 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:36.328 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:36 vm01 bash[28222]: audit 2026-04-16T19:23:36.044679+0000 mon.vm01 (mon.0) 868 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.050212+0000 mon.vm01 (mon.0) 861 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.050212+0000 mon.vm01 (mon.0) 861 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.054144+0000 mon.vm01 (mon.0) 862 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.054144+0000 mon.vm01 (mon.0) 862 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: cluster 2026-04-16T19:23:35.277845+0000 mgr.vm01.nwhpas (mgr.14227) 175 : cluster [DBG] pgmap v80: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: cluster 2026-04-16T19:23:35.277845+0000 mgr.vm01.nwhpas (mgr.14227) 175 : cluster [DBG] pgmap v80: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.635294+0000 mon.vm01 (mon.0) 863 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.635294+0000 mon.vm01 (mon.0) 863 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.641828+0000 mon.vm01 (mon.0) 864 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.641828+0000 mon.vm01 (mon.0) 864 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.642961+0000 mon.vm01 (mon.0) 865 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.642961+0000 mon.vm01 (mon.0) 865 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:36.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.643577+0000 mon.vm01 (mon.0) 866 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:36.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:35.643577+0000 mon.vm01 (mon.0) 866 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:36.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: cephadm 2026-04-16T19:23:35.646580+0000 mgr.vm01.nwhpas (mgr.14227) 176 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-16T19:23:36.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: cephadm 2026-04-16T19:23:35.646580+0000 mgr.vm01.nwhpas (mgr.14227) 176 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-16T19:23:36.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:36.042703+0000 mon.vm01 (mon.0) 867 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:36.042703+0000 mon.vm01 (mon.0) 867 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:36.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:36.044679+0000 mon.vm01 (mon.0) 868 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:36.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:36 vm04 bash[34817]: audit 2026-04-16T19:23:36.044679+0000 mon.vm01 (mon.0) 868 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:37.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:37 vm01 bash[28222]: audit 2026-04-16T19:23:37.266001+0000 mon.vm01 (mon.0) 869 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [0, 4]} : dispatch 2026-04-16T19:23:37.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:37 vm01 bash[28222]: audit 2026-04-16T19:23:37.266001+0000 mon.vm01 (mon.0) 869 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [0, 4]} : dispatch 2026-04-16T19:23:37.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:37 vm01 bash[28222]: audit 2026-04-16T19:23:37.266143+0000 mon.vm01 (mon.0) 870 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.12", "id": [1, 5]} : dispatch 2026-04-16T19:23:37.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:37 vm01 bash[28222]: audit 2026-04-16T19:23:37.266143+0000 mon.vm01 (mon.0) 870 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.12", "id": [1, 5]} : dispatch 2026-04-16T19:23:37.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:37 vm01 bash[28222]: audit 2026-04-16T19:23:37.266356+0000 mon.vm01 (mon.0) 871 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 0]} : dispatch 2026-04-16T19:23:37.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:37 vm01 bash[28222]: audit 2026-04-16T19:23:37.266356+0000 mon.vm01 (mon.0) 871 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 0]} : dispatch 2026-04-16T19:23:37.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:37 vm01 bash[28222]: audit 2026-04-16T19:23:37.266531+0000 mon.vm01 (mon.0) 872 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1a", "id": [4, 2]} : dispatch 2026-04-16T19:23:37.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:37 vm01 bash[28222]: audit 2026-04-16T19:23:37.266531+0000 mon.vm01 (mon.0) 872 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1a", "id": [4, 2]} : dispatch 2026-04-16T19:23:37.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:37 vm01 bash[28222]: audit 2026-04-16T19:23:37.266731+0000 mon.vm01 (mon.0) 873 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1e", "id": [1, 2]} : dispatch 2026-04-16T19:23:37.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:37 vm01 bash[28222]: audit 2026-04-16T19:23:37.266731+0000 mon.vm01 (mon.0) 873 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1e", "id": [1, 2]} : dispatch 2026-04-16T19:23:37.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:37 vm04 bash[34817]: audit 2026-04-16T19:23:37.266001+0000 mon.vm01 (mon.0) 869 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [0, 4]} : dispatch 2026-04-16T19:23:37.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:37 vm04 bash[34817]: audit 2026-04-16T19:23:37.266001+0000 mon.vm01 (mon.0) 869 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [0, 4]} : dispatch 2026-04-16T19:23:37.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:37 vm04 bash[34817]: audit 2026-04-16T19:23:37.266143+0000 mon.vm01 (mon.0) 870 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.12", "id": [1, 5]} : dispatch 2026-04-16T19:23:37.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:37 vm04 bash[34817]: audit 2026-04-16T19:23:37.266143+0000 mon.vm01 (mon.0) 870 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.12", "id": [1, 5]} : dispatch 2026-04-16T19:23:37.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:37 vm04 bash[34817]: audit 2026-04-16T19:23:37.266356+0000 mon.vm01 (mon.0) 871 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 0]} : dispatch 2026-04-16T19:23:37.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:37 vm04 bash[34817]: audit 2026-04-16T19:23:37.266356+0000 mon.vm01 (mon.0) 871 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 0]} : dispatch 2026-04-16T19:23:37.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:37 vm04 bash[34817]: audit 2026-04-16T19:23:37.266531+0000 mon.vm01 (mon.0) 872 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1a", "id": [4, 2]} : dispatch 2026-04-16T19:23:37.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:37 vm04 bash[34817]: audit 2026-04-16T19:23:37.266531+0000 mon.vm01 (mon.0) 872 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1a", "id": [4, 2]} : dispatch 2026-04-16T19:23:37.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:37 vm04 bash[34817]: audit 2026-04-16T19:23:37.266731+0000 mon.vm01 (mon.0) 873 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1e", "id": [1, 2]} : dispatch 2026-04-16T19:23:37.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:37 vm04 bash[34817]: audit 2026-04-16T19:23:37.266731+0000 mon.vm01 (mon.0) 873 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1e", "id": [1, 2]} : dispatch 2026-04-16T19:23:38.005 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.pktgwy to stop 2026-04-16T19:23:38.215 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:23:38.215 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (34s) 2s ago 34s 97.6M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 3ec7963e1b19 2026-04-16T19:23:38.215 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (32s) 2s ago 32s 97.2M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b6cd22f5d84 2026-04-16T19:23:38.215 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (33s) 3s ago 33s 97.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:23:38.216 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (35s) 3s ago 35s 97.8M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:23:38.494 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:23:38.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: cluster 2026-04-16T19:23:37.278243+0000 mgr.vm01.nwhpas (mgr.14227) 177 : cluster [DBG] pgmap v81: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:23:38.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: cluster 2026-04-16T19:23:37.278243+0000 mgr.vm01.nwhpas (mgr.14227) 177 : cluster [DBG] pgmap v81: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:23:38.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.362707+0000 mon.vm01 (mon.0) 874 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [0, 4]}]': finished 2026-04-16T19:23:38.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.362707+0000 mon.vm01 (mon.0) 874 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [0, 4]}]': finished 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.362749+0000 mon.vm01 (mon.0) 875 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.12", "id": [1, 5]}]': finished 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.362749+0000 mon.vm01 (mon.0) 875 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.12", "id": [1, 5]}]': finished 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.362771+0000 mon.vm01 (mon.0) 876 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 0]}]': finished 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.362771+0000 mon.vm01 (mon.0) 876 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 0]}]': finished 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.362791+0000 mon.vm01 (mon.0) 877 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1a", "id": [4, 2]}]': finished 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.362791+0000 mon.vm01 (mon.0) 877 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1a", "id": [4, 2]}]': finished 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.362811+0000 mon.vm01 (mon.0) 878 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1e", "id": [1, 2]}]': finished 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.362811+0000 mon.vm01 (mon.0) 878 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1e", "id": [1, 2]}]': finished 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: cluster 2026-04-16T19:23:37.369194+0000 mon.vm01 (mon.0) 879 : cluster [DBG] osdmap e34: 8 total, 8 up, 8 in 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: cluster 2026-04-16T19:23:37.369194+0000 mon.vm01 (mon.0) 879 : cluster [DBG] osdmap e34: 8 total, 8 up, 8 in 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.560362+0000 mon.vm01 (mon.0) 880 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: audit 2026-04-16T19:23:37.560362+0000 mon.vm01 (mon.0) 880 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: cluster 2026-04-16T19:23:38.368753+0000 mon.vm01 (mon.0) 881 : cluster [DBG] osdmap e35: 8 total, 8 up, 8 in 2026-04-16T19:23:38.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:38 vm04 bash[34817]: cluster 2026-04-16T19:23:38.368753+0000 mon.vm01 (mon.0) 881 : cluster [DBG] osdmap e35: 8 total, 8 up, 8 in 2026-04-16T19:23:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: cluster 2026-04-16T19:23:37.278243+0000 mgr.vm01.nwhpas (mgr.14227) 177 : cluster [DBG] pgmap v81: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:23:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: cluster 2026-04-16T19:23:37.278243+0000 mgr.vm01.nwhpas (mgr.14227) 177 : cluster [DBG] pgmap v81: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:23:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.362707+0000 mon.vm01 (mon.0) 874 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [0, 4]}]': finished 2026-04-16T19:23:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.362707+0000 mon.vm01 (mon.0) 874 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.9", "id": [0, 4]}]': finished 2026-04-16T19:23:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.362749+0000 mon.vm01 (mon.0) 875 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.12", "id": [1, 5]}]': finished 2026-04-16T19:23:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.362749+0000 mon.vm01 (mon.0) 875 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "3.12", "id": [1, 5]}]': finished 2026-04-16T19:23:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.362771+0000 mon.vm01 (mon.0) 876 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 0]}]': finished 2026-04-16T19:23:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.362771+0000 mon.vm01 (mon.0) 876 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.18", "id": [1, 0]}]': finished 2026-04-16T19:23:38.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.362791+0000 mon.vm01 (mon.0) 877 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1a", "id": [4, 2]}]': finished 2026-04-16T19:23:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.362791+0000 mon.vm01 (mon.0) 877 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1a", "id": [4, 2]}]': finished 2026-04-16T19:23:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.362811+0000 mon.vm01 (mon.0) 878 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1e", "id": [1, 2]}]': finished 2026-04-16T19:23:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.362811+0000 mon.vm01 (mon.0) 878 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1e", "id": [1, 2]}]': finished 2026-04-16T19:23:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: cluster 2026-04-16T19:23:37.369194+0000 mon.vm01 (mon.0) 879 : cluster [DBG] osdmap e34: 8 total, 8 up, 8 in 2026-04-16T19:23:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: cluster 2026-04-16T19:23:37.369194+0000 mon.vm01 (mon.0) 879 : cluster [DBG] osdmap e34: 8 total, 8 up, 8 in 2026-04-16T19:23:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.560362+0000 mon.vm01 (mon.0) 880 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: audit 2026-04-16T19:23:37.560362+0000 mon.vm01 (mon.0) 880 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: cluster 2026-04-16T19:23:38.368753+0000 mon.vm01 (mon.0) 881 : cluster [DBG] osdmap e35: 8 total, 8 up, 8 in 2026-04-16T19:23:38.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:38 vm01 bash[28222]: cluster 2026-04-16T19:23:38.368753+0000 mon.vm01 (mon.0) 881 : cluster [DBG] osdmap e35: 8 total, 8 up, 8 in 2026-04-16T19:23:39.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:39 vm04 bash[34817]: audit 2026-04-16T19:23:37.986040+0000 mgr.vm01.nwhpas (mgr.14227) 178 : audit [DBG] from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:39.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:39 vm04 bash[34817]: audit 2026-04-16T19:23:37.986040+0000 mgr.vm01.nwhpas (mgr.14227) 178 : audit [DBG] from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:39.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:39 vm04 bash[34817]: audit 2026-04-16T19:23:38.210523+0000 mgr.vm01.nwhpas (mgr.14227) 179 : audit [DBG] from='client.14750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:39.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:39 vm04 bash[34817]: audit 2026-04-16T19:23:38.210523+0000 mgr.vm01.nwhpas (mgr.14227) 179 : audit [DBG] from='client.14750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:39.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:39 vm04 bash[34817]: audit 2026-04-16T19:23:38.493740+0000 mon.vm01 (mon.0) 882 : audit [DBG] from='client.? 192.168.123.101:0/377699935' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:39.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:39 vm04 bash[34817]: audit 2026-04-16T19:23:38.493740+0000 mon.vm01 (mon.0) 882 : audit [DBG] from='client.? 192.168.123.101:0/377699935' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:39.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:39 vm01 bash[28222]: audit 2026-04-16T19:23:37.986040+0000 mgr.vm01.nwhpas (mgr.14227) 178 : audit [DBG] from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:39.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:39 vm01 bash[28222]: audit 2026-04-16T19:23:37.986040+0000 mgr.vm01.nwhpas (mgr.14227) 178 : audit [DBG] from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:39.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:39 vm01 bash[28222]: audit 2026-04-16T19:23:38.210523+0000 mgr.vm01.nwhpas (mgr.14227) 179 : audit [DBG] from='client.14750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:39.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:39 vm01 bash[28222]: audit 2026-04-16T19:23:38.210523+0000 mgr.vm01.nwhpas (mgr.14227) 179 : audit [DBG] from='client.14750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:39.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:39 vm01 bash[28222]: audit 2026-04-16T19:23:38.493740+0000 mon.vm01 (mon.0) 882 : audit [DBG] from='client.? 192.168.123.101:0/377699935' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:39.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:39 vm01 bash[28222]: audit 2026-04-16T19:23:38.493740+0000 mon.vm01 (mon.0) 882 : audit [DBG] from='client.? 192.168.123.101:0/377699935' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:40.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:40 vm04 bash[34817]: cluster 2026-04-16T19:23:39.278708+0000 mgr.vm01.nwhpas (mgr.14227) 180 : cluster [DBG] pgmap v84: 129 pgs: 2 peering, 127 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 255 B/s wr, 23 op/s 2026-04-16T19:23:40.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:40 vm04 bash[34817]: cluster 2026-04-16T19:23:39.278708+0000 mgr.vm01.nwhpas (mgr.14227) 180 : cluster [DBG] pgmap v84: 129 pgs: 2 peering, 127 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 255 B/s wr, 23 op/s 2026-04-16T19:23:40.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:40 vm01 bash[28222]: cluster 2026-04-16T19:23:39.278708+0000 mgr.vm01.nwhpas (mgr.14227) 180 : cluster [DBG] pgmap v84: 129 pgs: 2 peering, 127 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 255 B/s wr, 23 op/s 2026-04-16T19:23:40.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:40 vm01 bash[28222]: cluster 2026-04-16T19:23:39.278708+0000 mgr.vm01.nwhpas (mgr.14227) 180 : cluster [DBG] pgmap v84: 129 pgs: 2 peering, 127 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 255 B/s wr, 23 op/s 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: cluster 2026-04-16T19:23:41.279157+0000 mgr.vm01.nwhpas (mgr.14227) 181 : cluster [DBG] pgmap v85: 129 pgs: 2 peering, 127 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 255 B/s wr, 28 op/s 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: cluster 2026-04-16T19:23:41.279157+0000 mgr.vm01.nwhpas (mgr.14227) 181 : cluster [DBG] pgmap v85: 129 pgs: 2 peering, 127 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 255 B/s wr, 28 op/s 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.150126+0000 mon.vm01 (mon.0) 883 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.150126+0000 mon.vm01 (mon.0) 883 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.156783+0000 mon.vm01 (mon.0) 884 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.156783+0000 mon.vm01 (mon.0) 884 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.165631+0000 mon.vm01 (mon.0) 885 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.165631+0000 mon.vm01 (mon.0) 885 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.170715+0000 mon.vm01 (mon.0) 886 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.170715+0000 mon.vm01 (mon.0) 886 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.173707+0000 mon.vm01 (mon.0) 887 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.173707+0000 mon.vm01 (mon.0) 887 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.175206+0000 mon.vm01 (mon.0) 888 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:42 vm04 bash[34817]: audit 2026-04-16T19:23:42.175206+0000 mon.vm01 (mon.0) 888 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: cluster 2026-04-16T19:23:41.279157+0000 mgr.vm01.nwhpas (mgr.14227) 181 : cluster [DBG] pgmap v85: 129 pgs: 2 peering, 127 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 255 B/s wr, 28 op/s 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: cluster 2026-04-16T19:23:41.279157+0000 mgr.vm01.nwhpas (mgr.14227) 181 : cluster [DBG] pgmap v85: 129 pgs: 2 peering, 127 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 255 B/s wr, 28 op/s 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.150126+0000 mon.vm01 (mon.0) 883 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.150126+0000 mon.vm01 (mon.0) 883 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.156783+0000 mon.vm01 (mon.0) 884 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.156783+0000 mon.vm01 (mon.0) 884 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.165631+0000 mon.vm01 (mon.0) 885 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.165631+0000 mon.vm01 (mon.0) 885 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.170715+0000 mon.vm01 (mon.0) 886 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.170715+0000 mon.vm01 (mon.0) 886 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.173707+0000 mon.vm01 (mon.0) 887 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.173707+0000 mon.vm01 (mon.0) 887 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.175206+0000 mon.vm01 (mon.0) 888 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:42 vm01 bash[28222]: audit 2026-04-16T19:23:42.175206+0000 mon.vm01 (mon.0) 888 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:43.727 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.pktgwy to stop 2026-04-16T19:23:43.923 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:23:43.923 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (40s) 8s ago 40s 97.6M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 3ec7963e1b19 2026-04-16T19:23:43.923 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (38s) 8s ago 38s 97.2M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b6cd22f5d84 2026-04-16T19:23:43.923 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (39s) 8s ago 39s 97.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:23:43.923 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (41s) 8s ago 41s 97.8M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:23:44.163 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:23:44.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:43 vm04 bash[34817]: audit 2026-04-16T19:23:42.174135+0000 mgr.vm01.nwhpas (mgr.14227) 182 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-16T19:23:44.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:43 vm04 bash[34817]: audit 2026-04-16T19:23:42.174135+0000 mgr.vm01.nwhpas (mgr.14227) 182 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-16T19:23:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:43 vm01 bash[28222]: audit 2026-04-16T19:23:42.174135+0000 mgr.vm01.nwhpas (mgr.14227) 182 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-16T19:23:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:43 vm01 bash[28222]: audit 2026-04-16T19:23:42.174135+0000 mgr.vm01.nwhpas (mgr.14227) 182 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-16T19:23:45.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:44 vm04 bash[34817]: cluster 2026-04-16T19:23:43.279587+0000 mgr.vm01.nwhpas (mgr.14227) 183 : cluster [DBG] pgmap v86: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 9.9 KiB/s rd, 1023 B/s wr, 17 op/s; 32 B/s, 0 objects/s recovering 2026-04-16T19:23:45.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:44 vm04 bash[34817]: cluster 2026-04-16T19:23:43.279587+0000 mgr.vm01.nwhpas (mgr.14227) 183 : cluster [DBG] pgmap v86: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 9.9 KiB/s rd, 1023 B/s wr, 17 op/s; 32 B/s, 0 objects/s recovering 2026-04-16T19:23:45.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:44 vm04 bash[34817]: audit 2026-04-16T19:23:43.708467+0000 mgr.vm01.nwhpas (mgr.14227) 184 : audit [DBG] from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:45.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:44 vm04 bash[34817]: audit 2026-04-16T19:23:43.708467+0000 mgr.vm01.nwhpas (mgr.14227) 184 : audit [DBG] from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:45.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:44 vm04 bash[34817]: audit 2026-04-16T19:23:44.163109+0000 mon.vm01 (mon.0) 889 : audit [DBG] from='client.? 192.168.123.101:0/2843808079' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:45.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:44 vm04 bash[34817]: audit 2026-04-16T19:23:44.163109+0000 mon.vm01 (mon.0) 889 : audit [DBG] from='client.? 192.168.123.101:0/2843808079' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:45.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:44 vm01 bash[28222]: cluster 2026-04-16T19:23:43.279587+0000 mgr.vm01.nwhpas (mgr.14227) 183 : cluster [DBG] pgmap v86: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 9.9 KiB/s rd, 1023 B/s wr, 17 op/s; 32 B/s, 0 objects/s recovering 2026-04-16T19:23:45.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:44 vm01 bash[28222]: cluster 2026-04-16T19:23:43.279587+0000 mgr.vm01.nwhpas (mgr.14227) 183 : cluster [DBG] pgmap v86: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 9.9 KiB/s rd, 1023 B/s wr, 17 op/s; 32 B/s, 0 objects/s recovering 2026-04-16T19:23:45.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:44 vm01 bash[28222]: audit 2026-04-16T19:23:43.708467+0000 mgr.vm01.nwhpas (mgr.14227) 184 : audit [DBG] from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:45.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:44 vm01 bash[28222]: audit 2026-04-16T19:23:43.708467+0000 mgr.vm01.nwhpas (mgr.14227) 184 : audit [DBG] from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:45.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:44 vm01 bash[28222]: audit 2026-04-16T19:23:44.163109+0000 mon.vm01 (mon.0) 889 : audit [DBG] from='client.? 192.168.123.101:0/2843808079' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:45.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:44 vm01 bash[28222]: audit 2026-04-16T19:23:44.163109+0000 mon.vm01 (mon.0) 889 : audit [DBG] from='client.? 192.168.123.101:0/2843808079' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:46.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:45 vm04 bash[34817]: audit 2026-04-16T19:23:43.920124+0000 mgr.vm01.nwhpas (mgr.14227) 185 : audit [DBG] from='client.14762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:46.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:45 vm04 bash[34817]: audit 2026-04-16T19:23:43.920124+0000 mgr.vm01.nwhpas (mgr.14227) 185 : audit [DBG] from='client.14762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:46.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:45 vm01 bash[28222]: audit 2026-04-16T19:23:43.920124+0000 mgr.vm01.nwhpas (mgr.14227) 185 : audit [DBG] from='client.14762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:46.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:45 vm01 bash[28222]: audit 2026-04-16T19:23:43.920124+0000 mgr.vm01.nwhpas (mgr.14227) 185 : audit [DBG] from='client.14762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:46.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:46 vm01 bash[28222]: cluster 2026-04-16T19:23:45.280038+0000 mgr.vm01.nwhpas (mgr.14227) 186 : cluster [DBG] pgmap v87: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 9.9 KiB/s rd, 1023 B/s wr, 17 op/s; 32 B/s, 0 objects/s recovering 2026-04-16T19:23:46.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:46 vm01 bash[28222]: cluster 2026-04-16T19:23:45.280038+0000 mgr.vm01.nwhpas (mgr.14227) 186 : cluster [DBG] pgmap v87: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 9.9 KiB/s rd, 1023 B/s wr, 17 op/s; 32 B/s, 0 objects/s recovering 2026-04-16T19:23:47.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:46 vm04 bash[34817]: cluster 2026-04-16T19:23:45.280038+0000 mgr.vm01.nwhpas (mgr.14227) 186 : cluster [DBG] pgmap v87: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 9.9 KiB/s rd, 1023 B/s wr, 17 op/s; 32 B/s, 0 objects/s recovering 2026-04-16T19:23:47.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:46 vm04 bash[34817]: cluster 2026-04-16T19:23:45.280038+0000 mgr.vm01.nwhpas (mgr.14227) 186 : cluster [DBG] pgmap v87: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 9.9 KiB/s rd, 1023 B/s wr, 17 op/s; 32 B/s, 0 objects/s recovering 2026-04-16T19:23:48.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.015965+0000 mon.vm01 (mon.0) 890 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.015965+0000 mon.vm01 (mon.0) 890 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.021420+0000 mon.vm01 (mon.0) 891 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.021420+0000 mon.vm01 (mon.0) 891 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: cluster 2026-04-16T19:23:47.280460+0000 mgr.vm01.nwhpas (mgr.14227) 187 : cluster [DBG] pgmap v88: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.0 KiB/s rd, 826 B/s wr, 14 op/s; 26 B/s, 0 objects/s recovering 2026-04-16T19:23:48.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: cluster 2026-04-16T19:23:47.280460+0000 mgr.vm01.nwhpas (mgr.14227) 187 : cluster [DBG] pgmap v88: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.0 KiB/s rd, 826 B/s wr, 14 op/s; 26 B/s, 0 objects/s recovering 2026-04-16T19:23:48.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.577397+0000 mon.vm01 (mon.0) 892 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.577397+0000 mon.vm01 (mon.0) 892 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.629859+0000 mon.vm01 (mon.0) 893 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.629859+0000 mon.vm01 (mon.0) 893 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.630790+0000 mon.vm01 (mon.0) 894 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:48.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.630790+0000 mon.vm01 (mon.0) 894 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:48.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.631312+0000 mon.vm01 (mon.0) 895 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:48.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.631312+0000 mon.vm01 (mon.0) 895 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:48.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.634599+0000 mon.vm01 (mon.0) 896 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.634599+0000 mon.vm01 (mon.0) 896 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.636222+0000 mon.vm01 (mon.0) 897 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:48.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:48 vm04 bash[34817]: audit 2026-04-16T19:23:47.636222+0000 mon.vm01 (mon.0) 897 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:48.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.015965+0000 mon.vm01 (mon.0) 890 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.015965+0000 mon.vm01 (mon.0) 890 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.021420+0000 mon.vm01 (mon.0) 891 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.021420+0000 mon.vm01 (mon.0) 891 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: cluster 2026-04-16T19:23:47.280460+0000 mgr.vm01.nwhpas (mgr.14227) 187 : cluster [DBG] pgmap v88: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.0 KiB/s rd, 826 B/s wr, 14 op/s; 26 B/s, 0 objects/s recovering 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: cluster 2026-04-16T19:23:47.280460+0000 mgr.vm01.nwhpas (mgr.14227) 187 : cluster [DBG] pgmap v88: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.0 KiB/s rd, 826 B/s wr, 14 op/s; 26 B/s, 0 objects/s recovering 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.577397+0000 mon.vm01 (mon.0) 892 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.577397+0000 mon.vm01 (mon.0) 892 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.629859+0000 mon.vm01 (mon.0) 893 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.629859+0000 mon.vm01 (mon.0) 893 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.630790+0000 mon.vm01 (mon.0) 894 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.630790+0000 mon.vm01 (mon.0) 894 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.631312+0000 mon.vm01 (mon.0) 895 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.631312+0000 mon.vm01 (mon.0) 895 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.634599+0000 mon.vm01 (mon.0) 896 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.634599+0000 mon.vm01 (mon.0) 896 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.636222+0000 mon.vm01 (mon.0) 897 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:48.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:48 vm01 bash[28222]: audit 2026-04-16T19:23:47.636222+0000 mon.vm01 (mon.0) 897 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:49.385 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 stopped 1s ago 46s - - 2026-04-16T19:23:49.391 INFO:teuthology.orchestra.run.vm01.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-16T19:23:49.392 INFO:teuthology.orchestra.run.vm01.stderr: Dload Upload Total Spent Left Speed 2026-04-16T19:23:49.397 INFO:teuthology.orchestra.run.vm01.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 37400 0 --:--:-- --:--:-- --:--:-- 37400 2026-04-16T19:23:49.605 INFO:teuthology.orchestra.run.vm01.stdout:anonymousScheduled to start rgw.foo.vm01.pktgwy on host 'vm01' 2026-04-16T19:23:49.833 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.pktgwy to start 2026-04-16T19:23:50.041 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:23:50.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 stopped 2s ago 46s - - 2026-04-16T19:23:50.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (44s) 2s ago 44s 98.1M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b6cd22f5d84 2026-04-16T19:23:50.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (45s) 3s ago 45s 98.2M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:23:50.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (47s) 3s ago 47s 98.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:23:50.274 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:23:50.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: cluster 2026-04-16T19:23:49.280929+0000 mgr.vm01.nwhpas (mgr.14227) 188 : cluster [DBG] pgmap v89: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.9 KiB/s rd, 562 B/s wr, 12 op/s; 24 B/s, 0 objects/s recovering 2026-04-16T19:23:50.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: cluster 2026-04-16T19:23:49.280929+0000 mgr.vm01.nwhpas (mgr.14227) 188 : cluster [DBG] pgmap v89: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.9 KiB/s rd, 562 B/s wr, 12 op/s; 24 B/s, 0 objects/s recovering 2026-04-16T19:23:50.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.366147+0000 mgr.vm01.nwhpas (mgr.14227) 189 : audit [DBG] from='client.14770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.366147+0000 mgr.vm01.nwhpas (mgr.14227) 189 : audit [DBG] from='client.14770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.590380+0000 mgr.vm01.nwhpas (mgr.14227) 190 : audit [DBG] from='client.14774 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm01.pktgwy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.590380+0000 mgr.vm01.nwhpas (mgr.14227) 190 : audit [DBG] from='client.14774 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm01.pktgwy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: cephadm 2026-04-16T19:23:49.590745+0000 mgr.vm01.nwhpas (mgr.14227) 191 : cephadm [INF] Schedule start daemon rgw.foo.vm01.pktgwy 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: cephadm 2026-04-16T19:23:49.590745+0000 mgr.vm01.nwhpas (mgr.14227) 191 : cephadm [INF] Schedule start daemon rgw.foo.vm01.pktgwy 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.597593+0000 mon.vm01 (mon.0) 898 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.597593+0000 mon.vm01 (mon.0) 898 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.603600+0000 mon.vm01 (mon.0) 899 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.603600+0000 mon.vm01 (mon.0) 899 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.604564+0000 mon.vm01 (mon.0) 900 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.604564+0000 mon.vm01 (mon.0) 900 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.815469+0000 mgr.vm01.nwhpas (mgr.14227) 192 : audit [DBG] from='client.14778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:49.815469+0000 mgr.vm01.nwhpas (mgr.14227) 192 : audit [DBG] from='client.14778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:50.273374+0000 mon.vm01 (mon.0) 901 : audit [DBG] from='client.? 192.168.123.101:0/3455545033' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:50.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:50 vm04 bash[34817]: audit 2026-04-16T19:23:50.273374+0000 mon.vm01 (mon.0) 901 : audit [DBG] from='client.? 192.168.123.101:0/3455545033' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: cluster 2026-04-16T19:23:49.280929+0000 mgr.vm01.nwhpas (mgr.14227) 188 : cluster [DBG] pgmap v89: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.9 KiB/s rd, 562 B/s wr, 12 op/s; 24 B/s, 0 objects/s recovering 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: cluster 2026-04-16T19:23:49.280929+0000 mgr.vm01.nwhpas (mgr.14227) 188 : cluster [DBG] pgmap v89: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.9 KiB/s rd, 562 B/s wr, 12 op/s; 24 B/s, 0 objects/s recovering 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.366147+0000 mgr.vm01.nwhpas (mgr.14227) 189 : audit [DBG] from='client.14770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.366147+0000 mgr.vm01.nwhpas (mgr.14227) 189 : audit [DBG] from='client.14770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.590380+0000 mgr.vm01.nwhpas (mgr.14227) 190 : audit [DBG] from='client.14774 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm01.pktgwy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.590380+0000 mgr.vm01.nwhpas (mgr.14227) 190 : audit [DBG] from='client.14774 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm01.pktgwy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: cephadm 2026-04-16T19:23:49.590745+0000 mgr.vm01.nwhpas (mgr.14227) 191 : cephadm [INF] Schedule start daemon rgw.foo.vm01.pktgwy 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: cephadm 2026-04-16T19:23:49.590745+0000 mgr.vm01.nwhpas (mgr.14227) 191 : cephadm [INF] Schedule start daemon rgw.foo.vm01.pktgwy 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.597593+0000 mon.vm01 (mon.0) 898 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.597593+0000 mon.vm01 (mon.0) 898 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.603600+0000 mon.vm01 (mon.0) 899 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.603600+0000 mon.vm01 (mon.0) 899 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.604564+0000 mon.vm01 (mon.0) 900 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.604564+0000 mon.vm01 (mon.0) 900 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.815469+0000 mgr.vm01.nwhpas (mgr.14227) 192 : audit [DBG] from='client.14778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:49.815469+0000 mgr.vm01.nwhpas (mgr.14227) 192 : audit [DBG] from='client.14778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:50.273374+0000 mon.vm01 (mon.0) 901 : audit [DBG] from='client.? 192.168.123.101:0/3455545033' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:50.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:50 vm01 bash[28222]: audit 2026-04-16T19:23:50.273374+0000 mon.vm01 (mon.0) 901 : audit [DBG] from='client.? 192.168.123.101:0/3455545033' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:51.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:51 vm04 bash[34817]: audit 2026-04-16T19:23:50.033891+0000 mgr.vm01.nwhpas (mgr.14227) 193 : audit [DBG] from='client.14782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:51.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:51 vm04 bash[34817]: audit 2026-04-16T19:23:50.033891+0000 mgr.vm01.nwhpas (mgr.14227) 193 : audit [DBG] from='client.14782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:51.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:51 vm01 bash[28222]: audit 2026-04-16T19:23:50.033891+0000 mgr.vm01.nwhpas (mgr.14227) 193 : audit [DBG] from='client.14782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:51.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:51 vm01 bash[28222]: audit 2026-04-16T19:23:50.033891+0000 mgr.vm01.nwhpas (mgr.14227) 193 : audit [DBG] from='client.14782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:52.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:52 vm04 bash[34817]: cluster 2026-04-16T19:23:51.281442+0000 mgr.vm01.nwhpas (mgr.14227) 194 : cluster [DBG] pgmap v90: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.7 KiB/s rd, 511 B/s wr, 7 op/s; 21 B/s, 0 objects/s recovering 2026-04-16T19:23:52.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:52 vm04 bash[34817]: cluster 2026-04-16T19:23:51.281442+0000 mgr.vm01.nwhpas (mgr.14227) 194 : cluster [DBG] pgmap v90: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.7 KiB/s rd, 511 B/s wr, 7 op/s; 21 B/s, 0 objects/s recovering 2026-04-16T19:23:52.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:52 vm04 bash[34817]: audit 2026-04-16T19:23:52.560604+0000 mon.vm01 (mon.0) 902 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:52.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:52 vm04 bash[34817]: audit 2026-04-16T19:23:52.560604+0000 mon.vm01 (mon.0) 902 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:52.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:52 vm01 bash[28222]: cluster 2026-04-16T19:23:51.281442+0000 mgr.vm01.nwhpas (mgr.14227) 194 : cluster [DBG] pgmap v90: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.7 KiB/s rd, 511 B/s wr, 7 op/s; 21 B/s, 0 objects/s recovering 2026-04-16T19:23:52.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:52 vm01 bash[28222]: cluster 2026-04-16T19:23:51.281442+0000 mgr.vm01.nwhpas (mgr.14227) 194 : cluster [DBG] pgmap v90: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.7 KiB/s rd, 511 B/s wr, 7 op/s; 21 B/s, 0 objects/s recovering 2026-04-16T19:23:52.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:52 vm01 bash[28222]: audit 2026-04-16T19:23:52.560604+0000 mon.vm01 (mon.0) 902 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:52.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:52 vm01 bash[28222]: audit 2026-04-16T19:23:52.560604+0000 mon.vm01 (mon.0) 902 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:23:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:54 vm04 bash[34817]: cluster 2026-04-16T19:23:53.281852+0000 mgr.vm01.nwhpas (mgr.14227) 195 : cluster [DBG] pgmap v91: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s; 21 B/s, 0 objects/s recovering 2026-04-16T19:23:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:54 vm04 bash[34817]: cluster 2026-04-16T19:23:53.281852+0000 mgr.vm01.nwhpas (mgr.14227) 195 : cluster [DBG] pgmap v91: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s; 21 B/s, 0 objects/s recovering 2026-04-16T19:23:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:54 vm04 bash[34817]: audit 2026-04-16T19:23:54.448967+0000 mon.vm01 (mon.0) 903 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:54 vm04 bash[34817]: audit 2026-04-16T19:23:54.448967+0000 mon.vm01 (mon.0) 903 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:54 vm04 bash[34817]: audit 2026-04-16T19:23:54.454522+0000 mon.vm01 (mon.0) 904 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:54.956 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:54 vm04 bash[34817]: audit 2026-04-16T19:23:54.454522+0000 mon.vm01 (mon.0) 904 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:54.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:54 vm01 bash[28222]: cluster 2026-04-16T19:23:53.281852+0000 mgr.vm01.nwhpas (mgr.14227) 195 : cluster [DBG] pgmap v91: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s; 21 B/s, 0 objects/s recovering 2026-04-16T19:23:54.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:54 vm01 bash[28222]: cluster 2026-04-16T19:23:53.281852+0000 mgr.vm01.nwhpas (mgr.14227) 195 : cluster [DBG] pgmap v91: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s; 21 B/s, 0 objects/s recovering 2026-04-16T19:23:54.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:54 vm01 bash[28222]: audit 2026-04-16T19:23:54.448967+0000 mon.vm01 (mon.0) 903 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:54.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:54 vm01 bash[28222]: audit 2026-04-16T19:23:54.448967+0000 mon.vm01 (mon.0) 903 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:54.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:54 vm01 bash[28222]: audit 2026-04-16T19:23:54.454522+0000 mon.vm01 (mon.0) 904 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:54.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:54 vm01 bash[28222]: audit 2026-04-16T19:23:54.454522+0000 mon.vm01 (mon.0) 904 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:55.488 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.pktgwy to start 2026-04-16T19:23:55.700 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:23:55.700 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 stopped 0s ago 52s - - 2026-04-16T19:23:55.700 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (50s) 0s ago 50s 98.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b6cd22f5d84 2026-04-16T19:23:55.700 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (51s) 1s ago 51s 99.0M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:23:55.700 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (53s) 1s ago 53s 99.3M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:23:55.986 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:23:56.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.976853+0000 mon.vm01 (mon.0) 905 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.976853+0000 mon.vm01 (mon.0) 905 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.981875+0000 mon.vm01 (mon.0) 906 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.981875+0000 mon.vm01 (mon.0) 906 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.982791+0000 mon.vm01 (mon.0) 907 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.982791+0000 mon.vm01 (mon.0) 907 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.983335+0000 mon.vm01 (mon.0) 908 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.983335+0000 mon.vm01 (mon.0) 908 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.986719+0000 mon.vm01 (mon.0) 909 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.986719+0000 mon.vm01 (mon.0) 909 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.988370+0000 mon.vm01 (mon.0) 910 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:54.988370+0000 mon.vm01 (mon.0) 910 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: cluster 2026-04-16T19:23:55.282286+0000 mgr.vm01.nwhpas (mgr.14227) 196 : cluster [DBG] pgmap v92: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: cluster 2026-04-16T19:23:55.282286+0000 mgr.vm01.nwhpas (mgr.14227) 196 : cluster [DBG] pgmap v92: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:55.465326+0000 mgr.vm01.nwhpas (mgr.14227) 197 : audit [DBG] from='client.14790 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:55.465326+0000 mgr.vm01.nwhpas (mgr.14227) 197 : audit [DBG] from='client.14790 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:55.696599+0000 mgr.vm01.nwhpas (mgr.14227) 198 : audit [DBG] from='client.14794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:55.696599+0000 mgr.vm01.nwhpas (mgr.14227) 198 : audit [DBG] from='client.14794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:55.710059+0000 mon.vm01 (mon.0) 911 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:55.710059+0000 mon.vm01 (mon.0) 911 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:55.785850+0000 mon.vm01 (mon.0) 912 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:55.785850+0000 mon.vm01 (mon.0) 912 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:55.788736+0000 mon.vm01 (mon.0) 913 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:56.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:55 vm01 bash[28222]: audit 2026-04-16T19:23:55.788736+0000 mon.vm01 (mon.0) 913 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:56.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.976853+0000 mon.vm01 (mon.0) 905 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.976853+0000 mon.vm01 (mon.0) 905 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.981875+0000 mon.vm01 (mon.0) 906 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.981875+0000 mon.vm01 (mon.0) 906 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.982791+0000 mon.vm01 (mon.0) 907 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.982791+0000 mon.vm01 (mon.0) 907 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.983335+0000 mon.vm01 (mon.0) 908 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.983335+0000 mon.vm01 (mon.0) 908 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.986719+0000 mon.vm01 (mon.0) 909 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.986719+0000 mon.vm01 (mon.0) 909 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.988370+0000 mon.vm01 (mon.0) 910 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:54.988370+0000 mon.vm01 (mon.0) 910 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: cluster 2026-04-16T19:23:55.282286+0000 mgr.vm01.nwhpas (mgr.14227) 196 : cluster [DBG] pgmap v92: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: cluster 2026-04-16T19:23:55.282286+0000 mgr.vm01.nwhpas (mgr.14227) 196 : cluster [DBG] pgmap v92: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:55.465326+0000 mgr.vm01.nwhpas (mgr.14227) 197 : audit [DBG] from='client.14790 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:55.465326+0000 mgr.vm01.nwhpas (mgr.14227) 197 : audit [DBG] from='client.14790 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:55.696599+0000 mgr.vm01.nwhpas (mgr.14227) 198 : audit [DBG] from='client.14794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:55.696599+0000 mgr.vm01.nwhpas (mgr.14227) 198 : audit [DBG] from='client.14794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:55.710059+0000 mon.vm01 (mon.0) 911 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:55.710059+0000 mon.vm01 (mon.0) 911 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:55.785850+0000 mon.vm01 (mon.0) 912 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:55.785850+0000 mon.vm01 (mon.0) 912 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:55.788736+0000 mon.vm01 (mon.0) 913 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:56.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:55 vm04 bash[34817]: audit 2026-04-16T19:23:55.788736+0000 mon.vm01 (mon.0) 913 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:23:57.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:56 vm04 bash[34817]: audit 2026-04-16T19:23:55.983668+0000 mon.vm01 (mon.0) 914 : audit [DBG] from='client.? 192.168.123.101:0/355338500' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:57.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:56 vm04 bash[34817]: audit 2026-04-16T19:23:55.983668+0000 mon.vm01 (mon.0) 914 : audit [DBG] from='client.? 192.168.123.101:0/355338500' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:57.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:56 vm01 bash[28222]: audit 2026-04-16T19:23:55.983668+0000 mon.vm01 (mon.0) 914 : audit [DBG] from='client.? 192.168.123.101:0/355338500' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:57.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:56 vm01 bash[28222]: audit 2026-04-16T19:23:55.983668+0000 mon.vm01 (mon.0) 914 : audit [DBG] from='client.? 192.168.123.101:0/355338500' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:23:58.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:58 vm04 bash[34817]: cluster 2026-04-16T19:23:57.282822+0000 mgr.vm01.nwhpas (mgr.14227) 199 : cluster [DBG] pgmap v93: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:58.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:23:58 vm04 bash[34817]: cluster 2026-04-16T19:23:57.282822+0000 mgr.vm01.nwhpas (mgr.14227) 199 : cluster [DBG] pgmap v93: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:58.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:58 vm01 bash[28222]: cluster 2026-04-16T19:23:57.282822+0000 mgr.vm01.nwhpas (mgr.14227) 199 : cluster [DBG] pgmap v93: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:23:58.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:23:58 vm01 bash[28222]: cluster 2026-04-16T19:23:57.282822+0000 mgr.vm01.nwhpas (mgr.14227) 199 : cluster [DBG] pgmap v93: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:00.352 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:00 vm01 bash[28222]: cluster 2026-04-16T19:23:59.283262+0000 mgr.vm01.nwhpas (mgr.14227) 200 : cluster [DBG] pgmap v94: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 0 B/s wr, 57 op/s 2026-04-16T19:24:00.352 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:00 vm01 bash[28222]: cluster 2026-04-16T19:23:59.283262+0000 mgr.vm01.nwhpas (mgr.14227) 200 : cluster [DBG] pgmap v94: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 0 B/s wr, 57 op/s 2026-04-16T19:24:00.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:00 vm04 bash[34817]: cluster 2026-04-16T19:23:59.283262+0000 mgr.vm01.nwhpas (mgr.14227) 200 : cluster [DBG] pgmap v94: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 0 B/s wr, 57 op/s 2026-04-16T19:24:00.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:00 vm04 bash[34817]: cluster 2026-04-16T19:23:59.283262+0000 mgr.vm01.nwhpas (mgr.14227) 200 : cluster [DBG] pgmap v94: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 0 B/s wr, 57 op/s 2026-04-16T19:24:01.235 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.pktgwy to start 2026-04-16T19:24:01.424 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:24:01.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5s) 0s ago 58s 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:01.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (55s) 0s ago 56s 99.1M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b6cd22f5d84 2026-04-16T19:24:01.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (57s) 0s ago 57s 99.3M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:24:01.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (59s) 0s ago 59s 99.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:24:01.661 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:00.767203+0000 mon.vm01 (mon.0) 915 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:00.767203+0000 mon.vm01 (mon.0) 915 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:00.773128+0000 mon.vm01 (mon.0) 916 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:00.773128+0000 mon.vm01 (mon.0) 916 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.319036+0000 mon.vm01 (mon.0) 917 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.319036+0000 mon.vm01 (mon.0) 917 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.326490+0000 mon.vm01 (mon.0) 918 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.326490+0000 mon.vm01 (mon.0) 918 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.659988+0000 mon.vm04 (mon.1) 35 : audit [DBG] from='client.? 192.168.123.101:0/2421373152' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.659988+0000 mon.vm04 (mon.1) 35 : audit [DBG] from='client.? 192.168.123.101:0/2421373152' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.689852+0000 mon.vm01 (mon.0) 919 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.689852+0000 mon.vm01 (mon.0) 919 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.690595+0000 mon.vm01 (mon.0) 920 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.690595+0000 mon.vm01 (mon.0) 920 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.696360+0000 mon.vm01 (mon.0) 921 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.696360+0000 mon.vm01 (mon.0) 921 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.698563+0000 mon.vm01 (mon.0) 922 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:02.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:01 vm04 bash[34817]: audit 2026-04-16T19:24:01.698563+0000 mon.vm01 (mon.0) 922 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:00.767203+0000 mon.vm01 (mon.0) 915 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:00.767203+0000 mon.vm01 (mon.0) 915 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:00.773128+0000 mon.vm01 (mon.0) 916 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:00.773128+0000 mon.vm01 (mon.0) 916 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.319036+0000 mon.vm01 (mon.0) 917 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.319036+0000 mon.vm01 (mon.0) 917 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.326490+0000 mon.vm01 (mon.0) 918 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.326490+0000 mon.vm01 (mon.0) 918 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.659988+0000 mon.vm04 (mon.1) 35 : audit [DBG] from='client.? 192.168.123.101:0/2421373152' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.659988+0000 mon.vm04 (mon.1) 35 : audit [DBG] from='client.? 192.168.123.101:0/2421373152' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.689852+0000 mon.vm01 (mon.0) 919 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:02.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.689852+0000 mon.vm01 (mon.0) 919 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:02.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.690595+0000 mon.vm01 (mon.0) 920 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:02.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.690595+0000 mon.vm01 (mon.0) 920 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:02.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.696360+0000 mon.vm01 (mon.0) 921 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.696360+0000 mon.vm01 (mon.0) 921 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:02.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.698563+0000 mon.vm01 (mon.0) 922 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:02.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:01 vm01 bash[28222]: audit 2026-04-16T19:24:01.698563+0000 mon.vm01 (mon.0) 922 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:03.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:02 vm04 bash[34817]: audit 2026-04-16T19:24:01.210756+0000 mgr.vm01.nwhpas (mgr.14227) 201 : audit [DBG] from='client.14814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:03.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:02 vm04 bash[34817]: audit 2026-04-16T19:24:01.210756+0000 mgr.vm01.nwhpas (mgr.14227) 201 : audit [DBG] from='client.14814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:03.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:02 vm04 bash[34817]: cluster 2026-04-16T19:24:01.283644+0000 mgr.vm01.nwhpas (mgr.14227) 202 : cluster [DBG] pgmap v95: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:03.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:02 vm04 bash[34817]: cluster 2026-04-16T19:24:01.283644+0000 mgr.vm01.nwhpas (mgr.14227) 202 : cluster [DBG] pgmap v95: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:03.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:02 vm04 bash[34817]: audit 2026-04-16T19:24:01.420594+0000 mgr.vm01.nwhpas (mgr.14227) 203 : audit [DBG] from='client.24507 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:03.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:02 vm04 bash[34817]: audit 2026-04-16T19:24:01.420594+0000 mgr.vm01.nwhpas (mgr.14227) 203 : audit [DBG] from='client.24507 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:03.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:02 vm01 bash[28222]: audit 2026-04-16T19:24:01.210756+0000 mgr.vm01.nwhpas (mgr.14227) 201 : audit [DBG] from='client.14814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:03.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:02 vm01 bash[28222]: audit 2026-04-16T19:24:01.210756+0000 mgr.vm01.nwhpas (mgr.14227) 201 : audit [DBG] from='client.14814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:03.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:02 vm01 bash[28222]: cluster 2026-04-16T19:24:01.283644+0000 mgr.vm01.nwhpas (mgr.14227) 202 : cluster [DBG] pgmap v95: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:03.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:02 vm01 bash[28222]: cluster 2026-04-16T19:24:01.283644+0000 mgr.vm01.nwhpas (mgr.14227) 202 : cluster [DBG] pgmap v95: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:03.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:02 vm01 bash[28222]: audit 2026-04-16T19:24:01.420594+0000 mgr.vm01.nwhpas (mgr.14227) 203 : audit [DBG] from='client.24507 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:03.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:02 vm01 bash[28222]: audit 2026-04-16T19:24:01.420594+0000 mgr.vm01.nwhpas (mgr.14227) 203 : audit [DBG] from='client.24507 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:03 vm04 bash[34817]: cluster 2026-04-16T19:24:03.283985+0000 mgr.vm01.nwhpas (mgr.14227) 204 : cluster [DBG] pgmap v96: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:04.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:03 vm04 bash[34817]: cluster 2026-04-16T19:24:03.283985+0000 mgr.vm01.nwhpas (mgr.14227) 204 : cluster [DBG] pgmap v96: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:03 vm01 bash[28222]: cluster 2026-04-16T19:24:03.283985+0000 mgr.vm01.nwhpas (mgr.14227) 204 : cluster [DBG] pgmap v96: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:04.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:03 vm01 bash[28222]: cluster 2026-04-16T19:24:03.283985+0000 mgr.vm01.nwhpas (mgr.14227) 204 : cluster [DBG] pgmap v96: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:06.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:06 vm04 bash[34817]: cluster 2026-04-16T19:24:05.284377+0000 mgr.vm01.nwhpas (mgr.14227) 205 : cluster [DBG] pgmap v97: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:06.706 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:06 vm04 bash[34817]: cluster 2026-04-16T19:24:05.284377+0000 mgr.vm01.nwhpas (mgr.14227) 205 : cluster [DBG] pgmap v97: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:06.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:06 vm01 bash[28222]: cluster 2026-04-16T19:24:05.284377+0000 mgr.vm01.nwhpas (mgr.14227) 205 : cluster [DBG] pgmap v97: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:06.708 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:06 vm01 bash[28222]: cluster 2026-04-16T19:24:05.284377+0000 mgr.vm01.nwhpas (mgr.14227) 205 : cluster [DBG] pgmap v97: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:06.880 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11s) 5s ago 63s 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:07.082 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to stop rgw.foo.vm01.qgurbb on host 'vm01' 2026-04-16T19:24:07.293 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:24:07.494 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:24:07.494 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11s) 6s ago 64s 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:07.494 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (62s) 6s ago 62s 99.1M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b6cd22f5d84 2026-04-16T19:24:07.494 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (63s) 6s ago 63s 99.3M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:24:07.494 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (65s) 6s ago 65s 99.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:24:07.730 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:24:08.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:06.863670+0000 mgr.vm01.nwhpas (mgr.14227) 206 : audit [DBG] from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:06.863670+0000 mgr.vm01.nwhpas (mgr.14227) 206 : audit [DBG] from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.070371+0000 mgr.vm01.nwhpas (mgr.14227) 207 : audit [DBG] from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm01.qgurbb", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.456 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.070371+0000 mgr.vm01.nwhpas (mgr.14227) 207 : audit [DBG] from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm01.qgurbb", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: cephadm 2026-04-16T19:24:07.070873+0000 mgr.vm01.nwhpas (mgr.14227) 208 : cephadm [INF] Schedule stop daemon rgw.foo.vm01.qgurbb 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: cephadm 2026-04-16T19:24:07.070873+0000 mgr.vm01.nwhpas (mgr.14227) 208 : cephadm [INF] Schedule stop daemon rgw.foo.vm01.qgurbb 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.076862+0000 mon.vm01 (mon.0) 923 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.076862+0000 mon.vm01 (mon.0) 923 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.081812+0000 mon.vm01 (mon.0) 924 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.081812+0000 mon.vm01 (mon.0) 924 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.082536+0000 mon.vm01 (mon.0) 925 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.082536+0000 mon.vm01 (mon.0) 925 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.273724+0000 mgr.vm01.nwhpas (mgr.14227) 209 : audit [DBG] from='client.14828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.273724+0000 mgr.vm01.nwhpas (mgr.14227) 209 : audit [DBG] from='client.14828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: cluster 2026-04-16T19:24:07.284774+0000 mgr.vm01.nwhpas (mgr.14227) 210 : cluster [DBG] pgmap v98: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: cluster 2026-04-16T19:24:07.284774+0000 mgr.vm01.nwhpas (mgr.14227) 210 : cluster [DBG] pgmap v98: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.490321+0000 mgr.vm01.nwhpas (mgr.14227) 211 : audit [DBG] from='client.14832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.490321+0000 mgr.vm01.nwhpas (mgr.14227) 211 : audit [DBG] from='client.14832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.563847+0000 mon.vm01 (mon.0) 926 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.563847+0000 mon.vm01 (mon.0) 926 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.730195+0000 mon.vm01 (mon.0) 927 : audit [DBG] from='client.? 192.168.123.101:0/2543091447' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:08.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:08 vm04 bash[34817]: audit 2026-04-16T19:24:07.730195+0000 mon.vm01 (mon.0) 927 : audit [DBG] from='client.? 192.168.123.101:0/2543091447' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:06.863670+0000 mgr.vm01.nwhpas (mgr.14227) 206 : audit [DBG] from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:06.863670+0000 mgr.vm01.nwhpas (mgr.14227) 206 : audit [DBG] from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.070371+0000 mgr.vm01.nwhpas (mgr.14227) 207 : audit [DBG] from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm01.qgurbb", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.070371+0000 mgr.vm01.nwhpas (mgr.14227) 207 : audit [DBG] from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm01.qgurbb", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: cephadm 2026-04-16T19:24:07.070873+0000 mgr.vm01.nwhpas (mgr.14227) 208 : cephadm [INF] Schedule stop daemon rgw.foo.vm01.qgurbb 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: cephadm 2026-04-16T19:24:07.070873+0000 mgr.vm01.nwhpas (mgr.14227) 208 : cephadm [INF] Schedule stop daemon rgw.foo.vm01.qgurbb 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.076862+0000 mon.vm01 (mon.0) 923 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.076862+0000 mon.vm01 (mon.0) 923 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.081812+0000 mon.vm01 (mon.0) 924 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.081812+0000 mon.vm01 (mon.0) 924 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.082536+0000 mon.vm01 (mon.0) 925 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.082536+0000 mon.vm01 (mon.0) 925 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.273724+0000 mgr.vm01.nwhpas (mgr.14227) 209 : audit [DBG] from='client.14828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.273724+0000 mgr.vm01.nwhpas (mgr.14227) 209 : audit [DBG] from='client.14828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: cluster 2026-04-16T19:24:07.284774+0000 mgr.vm01.nwhpas (mgr.14227) 210 : cluster [DBG] pgmap v98: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: cluster 2026-04-16T19:24:07.284774+0000 mgr.vm01.nwhpas (mgr.14227) 210 : cluster [DBG] pgmap v98: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.490321+0000 mgr.vm01.nwhpas (mgr.14227) 211 : audit [DBG] from='client.14832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.490321+0000 mgr.vm01.nwhpas (mgr.14227) 211 : audit [DBG] from='client.14832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.563847+0000 mon.vm01 (mon.0) 926 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.563847+0000 mon.vm01 (mon.0) 926 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.730195+0000 mon.vm01 (mon.0) 927 : audit [DBG] from='client.? 192.168.123.101:0/2543091447' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:08.458 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:08 vm01 bash[28222]: audit 2026-04-16T19:24:07.730195+0000 mon.vm01 (mon.0) 927 : audit [DBG] from='client.? 192.168.123.101:0/2543091447' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:11.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:10 vm04 bash[34817]: cluster 2026-04-16T19:24:09.285314+0000 mgr.vm01.nwhpas (mgr.14227) 212 : cluster [DBG] pgmap v99: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:11.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:10 vm04 bash[34817]: cluster 2026-04-16T19:24:09.285314+0000 mgr.vm01.nwhpas (mgr.14227) 212 : cluster [DBG] pgmap v99: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:11.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:10 vm01 bash[28222]: cluster 2026-04-16T19:24:09.285314+0000 mgr.vm01.nwhpas (mgr.14227) 212 : cluster [DBG] pgmap v99: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:11.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:10 vm01 bash[28222]: cluster 2026-04-16T19:24:09.285314+0000 mgr.vm01.nwhpas (mgr.14227) 212 : cluster [DBG] pgmap v99: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-16T19:24:12.944 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:24:13.128 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:24:13.128 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (17s) 0s ago 69s 94.2M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:13.128 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (67s) 0s ago 67s 99.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b6cd22f5d84 2026-04-16T19:24:13.128 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (68s) 1s ago 68s 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:24:13.128 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (70s) 1s ago 70s 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:24:13.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: cluster 2026-04-16T19:24:11.285770+0000 mgr.vm01.nwhpas (mgr.14227) 213 : cluster [DBG] pgmap v100: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 51 op/s 2026-04-16T19:24:13.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: cluster 2026-04-16T19:24:11.285770+0000 mgr.vm01.nwhpas (mgr.14227) 213 : cluster [DBG] pgmap v100: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 51 op/s 2026-04-16T19:24:13.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:11.947331+0000 mon.vm01 (mon.0) 928 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:11.947331+0000 mon.vm01 (mon.0) 928 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:11.952216+0000 mon.vm01 (mon.0) 929 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:11.952216+0000 mon.vm01 (mon.0) 929 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.443286+0000 mon.vm01 (mon.0) 930 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.443286+0000 mon.vm01 (mon.0) 930 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.448216+0000 mon.vm01 (mon.0) 931 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.448216+0000 mon.vm01 (mon.0) 931 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.449002+0000 mon.vm01 (mon.0) 932 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.449002+0000 mon.vm01 (mon.0) 932 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.449536+0000 mon.vm01 (mon.0) 933 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.449536+0000 mon.vm01 (mon.0) 933 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.452838+0000 mon.vm01 (mon.0) 934 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.452838+0000 mon.vm01 (mon.0) 934 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.454221+0000 mon.vm01 (mon.0) 935 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:13.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:12 vm04 bash[34817]: audit 2026-04-16T19:24:12.454221+0000 mon.vm01 (mon.0) 935 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: cluster 2026-04-16T19:24:11.285770+0000 mgr.vm01.nwhpas (mgr.14227) 213 : cluster [DBG] pgmap v100: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 51 op/s 2026-04-16T19:24:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: cluster 2026-04-16T19:24:11.285770+0000 mgr.vm01.nwhpas (mgr.14227) 213 : cluster [DBG] pgmap v100: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 51 op/s 2026-04-16T19:24:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:11.947331+0000 mon.vm01 (mon.0) 928 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:11.947331+0000 mon.vm01 (mon.0) 928 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:11.952216+0000 mon.vm01 (mon.0) 929 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:11.952216+0000 mon.vm01 (mon.0) 929 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.443286+0000 mon.vm01 (mon.0) 930 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.443286+0000 mon.vm01 (mon.0) 930 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.448216+0000 mon.vm01 (mon.0) 931 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.448216+0000 mon.vm01 (mon.0) 931 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.449002+0000 mon.vm01 (mon.0) 932 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:13.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.449002+0000 mon.vm01 (mon.0) 932 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:13.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.449536+0000 mon.vm01 (mon.0) 933 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:13.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.449536+0000 mon.vm01 (mon.0) 933 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:13.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.452838+0000 mon.vm01 (mon.0) 934 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.452838+0000 mon.vm01 (mon.0) 934 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:13.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.454221+0000 mon.vm01 (mon.0) 935 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:13.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:12 vm01 bash[28222]: audit 2026-04-16T19:24:12.454221+0000 mon.vm01 (mon.0) 935 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:13.354 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:24:14.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:13 vm04 bash[34817]: audit 2026-04-16T19:24:13.354279+0000 mon.vm01 (mon.0) 936 : audit [DBG] from='client.? 192.168.123.101:0/97234212' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:14.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:13 vm04 bash[34817]: audit 2026-04-16T19:24:13.354279+0000 mon.vm01 (mon.0) 936 : audit [DBG] from='client.? 192.168.123.101:0/97234212' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:14.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:13 vm01 bash[28222]: audit 2026-04-16T19:24:13.354279+0000 mon.vm01 (mon.0) 936 : audit [DBG] from='client.? 192.168.123.101:0/97234212' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:14.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:13 vm01 bash[28222]: audit 2026-04-16T19:24:13.354279+0000 mon.vm01 (mon.0) 936 : audit [DBG] from='client.? 192.168.123.101:0/97234212' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:15.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:14 vm04 bash[34817]: audit 2026-04-16T19:24:12.923273+0000 mgr.vm01.nwhpas (mgr.14227) 214 : audit [DBG] from='client.14840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:15.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:14 vm04 bash[34817]: audit 2026-04-16T19:24:12.923273+0000 mgr.vm01.nwhpas (mgr.14227) 214 : audit [DBG] from='client.14840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:15.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:14 vm04 bash[34817]: audit 2026-04-16T19:24:13.124506+0000 mgr.vm01.nwhpas (mgr.14227) 215 : audit [DBG] from='client.14844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:15.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:14 vm04 bash[34817]: audit 2026-04-16T19:24:13.124506+0000 mgr.vm01.nwhpas (mgr.14227) 215 : audit [DBG] from='client.14844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:15.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:14 vm04 bash[34817]: cluster 2026-04-16T19:24:13.286136+0000 mgr.vm01.nwhpas (mgr.14227) 216 : cluster [DBG] pgmap v101: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:15.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:14 vm04 bash[34817]: cluster 2026-04-16T19:24:13.286136+0000 mgr.vm01.nwhpas (mgr.14227) 216 : cluster [DBG] pgmap v101: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:14 vm01 bash[28222]: audit 2026-04-16T19:24:12.923273+0000 mgr.vm01.nwhpas (mgr.14227) 214 : audit [DBG] from='client.14840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:14 vm01 bash[28222]: audit 2026-04-16T19:24:12.923273+0000 mgr.vm01.nwhpas (mgr.14227) 214 : audit [DBG] from='client.14840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:14 vm01 bash[28222]: audit 2026-04-16T19:24:13.124506+0000 mgr.vm01.nwhpas (mgr.14227) 215 : audit [DBG] from='client.14844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:14 vm01 bash[28222]: audit 2026-04-16T19:24:13.124506+0000 mgr.vm01.nwhpas (mgr.14227) 215 : audit [DBG] from='client.14844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:14 vm01 bash[28222]: cluster 2026-04-16T19:24:13.286136+0000 mgr.vm01.nwhpas (mgr.14227) 216 : cluster [DBG] pgmap v101: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:15.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:14 vm01 bash[28222]: cluster 2026-04-16T19:24:13.286136+0000 mgr.vm01.nwhpas (mgr.14227) 216 : cluster [DBG] pgmap v101: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:16.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:16 vm01 bash[28222]: cluster 2026-04-16T19:24:15.286628+0000 mgr.vm01.nwhpas (mgr.14227) 217 : cluster [DBG] pgmap v102: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:16.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:16 vm01 bash[28222]: cluster 2026-04-16T19:24:15.286628+0000 mgr.vm01.nwhpas (mgr.14227) 217 : cluster [DBG] pgmap v102: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:17.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:16 vm04 bash[34817]: cluster 2026-04-16T19:24:15.286628+0000 mgr.vm01.nwhpas (mgr.14227) 217 : cluster [DBG] pgmap v102: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:17.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:16 vm04 bash[34817]: cluster 2026-04-16T19:24:15.286628+0000 mgr.vm01.nwhpas (mgr.14227) 217 : cluster [DBG] pgmap v102: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:18.574 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:24:18.779 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:24:18.779 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (23s) 6s ago 75s 94.2M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:18.779 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (73s) 6s ago 73s 99.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b6cd22f5d84 2026-04-16T19:24:18.779 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (74s) 6s ago 74s 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:24:18.779 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (76s) 6s ago 76s 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:24:18.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:18 vm01 bash[28222]: cluster 2026-04-16T19:24:17.287004+0000 mgr.vm01.nwhpas (mgr.14227) 218 : cluster [DBG] pgmap v103: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:18.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:18 vm01 bash[28222]: cluster 2026-04-16T19:24:17.287004+0000 mgr.vm01.nwhpas (mgr.14227) 218 : cluster [DBG] pgmap v103: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:19.018 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:24:19.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:18 vm04 bash[34817]: cluster 2026-04-16T19:24:17.287004+0000 mgr.vm01.nwhpas (mgr.14227) 218 : cluster [DBG] pgmap v103: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:19.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:18 vm04 bash[34817]: cluster 2026-04-16T19:24:17.287004+0000 mgr.vm01.nwhpas (mgr.14227) 218 : cluster [DBG] pgmap v103: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:24:20.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:19 vm04 bash[34817]: audit 2026-04-16T19:24:18.557097+0000 mgr.vm01.nwhpas (mgr.14227) 219 : audit [DBG] from='client.14852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:20.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:19 vm04 bash[34817]: audit 2026-04-16T19:24:18.557097+0000 mgr.vm01.nwhpas (mgr.14227) 219 : audit [DBG] from='client.14852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:20.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:19 vm04 bash[34817]: audit 2026-04-16T19:24:18.776493+0000 mgr.vm01.nwhpas (mgr.14227) 220 : audit [DBG] from='client.14856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:20.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:19 vm04 bash[34817]: audit 2026-04-16T19:24:18.776493+0000 mgr.vm01.nwhpas (mgr.14227) 220 : audit [DBG] from='client.14856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:20.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:19 vm04 bash[34817]: audit 2026-04-16T19:24:19.017664+0000 mon.vm01 (mon.0) 937 : audit [DBG] from='client.? 192.168.123.101:0/1039659827' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:20.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:19 vm04 bash[34817]: audit 2026-04-16T19:24:19.017664+0000 mon.vm01 (mon.0) 937 : audit [DBG] from='client.? 192.168.123.101:0/1039659827' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:19 vm01 bash[28222]: audit 2026-04-16T19:24:18.557097+0000 mgr.vm01.nwhpas (mgr.14227) 219 : audit [DBG] from='client.14852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:19 vm01 bash[28222]: audit 2026-04-16T19:24:18.557097+0000 mgr.vm01.nwhpas (mgr.14227) 219 : audit [DBG] from='client.14852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:19 vm01 bash[28222]: audit 2026-04-16T19:24:18.776493+0000 mgr.vm01.nwhpas (mgr.14227) 220 : audit [DBG] from='client.14856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:19 vm01 bash[28222]: audit 2026-04-16T19:24:18.776493+0000 mgr.vm01.nwhpas (mgr.14227) 220 : audit [DBG] from='client.14856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:19 vm01 bash[28222]: audit 2026-04-16T19:24:19.017664+0000 mon.vm01 (mon.0) 937 : audit [DBG] from='client.? 192.168.123.101:0/1039659827' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:20.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:19 vm01 bash[28222]: audit 2026-04-16T19:24:19.017664+0000 mon.vm01 (mon.0) 937 : audit [DBG] from='client.? 192.168.123.101:0/1039659827' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:21.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:20 vm04 bash[34817]: cluster 2026-04-16T19:24:19.287456+0000 mgr.vm01.nwhpas (mgr.14227) 221 : cluster [DBG] pgmap v104: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:21.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:20 vm04 bash[34817]: cluster 2026-04-16T19:24:19.287456+0000 mgr.vm01.nwhpas (mgr.14227) 221 : cluster [DBG] pgmap v104: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:21.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:20 vm01 bash[28222]: cluster 2026-04-16T19:24:19.287456+0000 mgr.vm01.nwhpas (mgr.14227) 221 : cluster [DBG] pgmap v104: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:21.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:20 vm01 bash[28222]: cluster 2026-04-16T19:24:19.287456+0000 mgr.vm01.nwhpas (mgr.14227) 221 : cluster [DBG] pgmap v104: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:23.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:22 vm01 bash[28222]: cluster 2026-04-16T19:24:21.287875+0000 mgr.vm01.nwhpas (mgr.14227) 222 : cluster [DBG] pgmap v105: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:23.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:22 vm01 bash[28222]: cluster 2026-04-16T19:24:21.287875+0000 mgr.vm01.nwhpas (mgr.14227) 222 : cluster [DBG] pgmap v105: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:23.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:22 vm01 bash[28222]: audit 2026-04-16T19:24:22.564077+0000 mon.vm01 (mon.0) 938 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:23.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:22 vm01 bash[28222]: audit 2026-04-16T19:24:22.564077+0000 mon.vm01 (mon.0) 938 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:22 vm04 bash[34817]: cluster 2026-04-16T19:24:21.287875+0000 mgr.vm01.nwhpas (mgr.14227) 222 : cluster [DBG] pgmap v105: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:22 vm04 bash[34817]: cluster 2026-04-16T19:24:21.287875+0000 mgr.vm01.nwhpas (mgr.14227) 222 : cluster [DBG] pgmap v105: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:22 vm04 bash[34817]: audit 2026-04-16T19:24:22.564077+0000 mon.vm01 (mon.0) 938 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:22 vm04 bash[34817]: audit 2026-04-16T19:24:22.564077+0000 mon.vm01 (mon.0) 938 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:23 vm04 bash[34817]: audit 2026-04-16T19:24:22.918545+0000 mon.vm01 (mon.0) 939 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:23 vm04 bash[34817]: audit 2026-04-16T19:24:22.918545+0000 mon.vm01 (mon.0) 939 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:23 vm04 bash[34817]: audit 2026-04-16T19:24:22.923201+0000 mon.vm01 (mon.0) 940 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:23 vm04 bash[34817]: audit 2026-04-16T19:24:22.923201+0000 mon.vm01 (mon.0) 940 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:23 vm04 bash[34817]: audit 2026-04-16T19:24:22.924822+0000 mon.vm01 (mon.0) 941 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:24:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:23 vm04 bash[34817]: audit 2026-04-16T19:24:22.924822+0000 mon.vm01 (mon.0) 941 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:24:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:23 vm04 bash[34817]: cluster 2026-04-16T19:24:23.288303+0000 mgr.vm01.nwhpas (mgr.14227) 223 : cluster [DBG] pgmap v106: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:24.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:23 vm04 bash[34817]: cluster 2026-04-16T19:24:23.288303+0000 mgr.vm01.nwhpas (mgr.14227) 223 : cluster [DBG] pgmap v106: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:23 vm01 bash[28222]: audit 2026-04-16T19:24:22.918545+0000 mon.vm01 (mon.0) 939 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:23 vm01 bash[28222]: audit 2026-04-16T19:24:22.918545+0000 mon.vm01 (mon.0) 939 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:23 vm01 bash[28222]: audit 2026-04-16T19:24:22.923201+0000 mon.vm01 (mon.0) 940 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:23 vm01 bash[28222]: audit 2026-04-16T19:24:22.923201+0000 mon.vm01 (mon.0) 940 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:23 vm01 bash[28222]: audit 2026-04-16T19:24:22.924822+0000 mon.vm01 (mon.0) 941 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:24:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:23 vm01 bash[28222]: audit 2026-04-16T19:24:22.924822+0000 mon.vm01 (mon.0) 941 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:24:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:23 vm01 bash[28222]: cluster 2026-04-16T19:24:23.288303+0000 mgr.vm01.nwhpas (mgr.14227) 223 : cluster [DBG] pgmap v106: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:24.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:23 vm01 bash[28222]: cluster 2026-04-16T19:24:23.288303+0000 mgr.vm01.nwhpas (mgr.14227) 223 : cluster [DBG] pgmap v106: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-16T19:24:24.225 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:24:24.424 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:24:24.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (28s) 11s ago 81s 94.2M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:24.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (78s) 11s ago 79s 99.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b6cd22f5d84 2026-04-16T19:24:24.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (80s) 12s ago 80s 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:24:24.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (82s) 12s ago 82s 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:24:24.693 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:24:25.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:24 vm04 bash[34817]: audit 2026-04-16T19:24:24.204847+0000 mgr.vm01.nwhpas (mgr.14227) 224 : audit [DBG] from='client.14864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:25.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:24 vm04 bash[34817]: audit 2026-04-16T19:24:24.204847+0000 mgr.vm01.nwhpas (mgr.14227) 224 : audit [DBG] from='client.14864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:25.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:24 vm04 bash[34817]: audit 2026-04-16T19:24:24.420557+0000 mgr.vm01.nwhpas (mgr.14227) 225 : audit [DBG] from='client.24541 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:25.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:24 vm04 bash[34817]: audit 2026-04-16T19:24:24.420557+0000 mgr.vm01.nwhpas (mgr.14227) 225 : audit [DBG] from='client.24541 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:25.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:24 vm04 bash[34817]: audit 2026-04-16T19:24:24.693102+0000 mon.vm01 (mon.0) 942 : audit [DBG] from='client.? 192.168.123.101:0/2180494749' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:25.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:24 vm04 bash[34817]: audit 2026-04-16T19:24:24.693102+0000 mon.vm01 (mon.0) 942 : audit [DBG] from='client.? 192.168.123.101:0/2180494749' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:25.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:24 vm01 bash[28222]: audit 2026-04-16T19:24:24.204847+0000 mgr.vm01.nwhpas (mgr.14227) 224 : audit [DBG] from='client.14864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:25.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:24 vm01 bash[28222]: audit 2026-04-16T19:24:24.204847+0000 mgr.vm01.nwhpas (mgr.14227) 224 : audit [DBG] from='client.14864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:25.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:24 vm01 bash[28222]: audit 2026-04-16T19:24:24.420557+0000 mgr.vm01.nwhpas (mgr.14227) 225 : audit [DBG] from='client.24541 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:25.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:24 vm01 bash[28222]: audit 2026-04-16T19:24:24.420557+0000 mgr.vm01.nwhpas (mgr.14227) 225 : audit [DBG] from='client.24541 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:25.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:24 vm01 bash[28222]: audit 2026-04-16T19:24:24.693102+0000 mon.vm01 (mon.0) 942 : audit [DBG] from='client.? 192.168.123.101:0/2180494749' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:25.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:24 vm01 bash[28222]: audit 2026-04-16T19:24:24.693102+0000 mon.vm01 (mon.0) 942 : audit [DBG] from='client.? 192.168.123.101:0/2180494749' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:26.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:25 vm04 bash[34817]: cluster 2026-04-16T19:24:25.288733+0000 mgr.vm01.nwhpas (mgr.14227) 226 : cluster [DBG] pgmap v107: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:26.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:25 vm04 bash[34817]: cluster 2026-04-16T19:24:25.288733+0000 mgr.vm01.nwhpas (mgr.14227) 226 : cluster [DBG] pgmap v107: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:25 vm01 bash[28222]: cluster 2026-04-16T19:24:25.288733+0000 mgr.vm01.nwhpas (mgr.14227) 226 : cluster [DBG] pgmap v107: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:26.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:25 vm01 bash[28222]: cluster 2026-04-16T19:24:25.288733+0000 mgr.vm01.nwhpas (mgr.14227) 226 : cluster [DBG] pgmap v107: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:29.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: cluster 2026-04-16T19:24:27.289156+0000 mgr.vm01.nwhpas (mgr.14227) 227 : cluster [DBG] pgmap v108: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:29.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: cluster 2026-04-16T19:24:27.289156+0000 mgr.vm01.nwhpas (mgr.14227) 227 : cluster [DBG] pgmap v108: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:29.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:27.759042+0000 mon.vm01 (mon.0) 943 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:27.759042+0000 mon.vm01 (mon.0) 943 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:27.764206+0000 mon.vm01 (mon.0) 944 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:27.764206+0000 mon.vm01 (mon.0) 944 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.333182+0000 mon.vm01 (mon.0) 945 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.333182+0000 mon.vm01 (mon.0) 945 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.340171+0000 mon.vm01 (mon.0) 946 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.340171+0000 mon.vm01 (mon.0) 946 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.341231+0000 mon.vm01 (mon.0) 947 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:29.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.341231+0000 mon.vm01 (mon.0) 947 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:29.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.342086+0000 mon.vm01 (mon.0) 948 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:29.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.342086+0000 mon.vm01 (mon.0) 948 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:29.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.348511+0000 mon.vm01 (mon.0) 949 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.348511+0000 mon.vm01 (mon.0) 949 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.350655+0000 mon.vm01 (mon.0) 950 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:29.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:28 vm04 bash[34817]: audit 2026-04-16T19:24:28.350655+0000 mon.vm01 (mon.0) 950 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: cluster 2026-04-16T19:24:27.289156+0000 mgr.vm01.nwhpas (mgr.14227) 227 : cluster [DBG] pgmap v108: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: cluster 2026-04-16T19:24:27.289156+0000 mgr.vm01.nwhpas (mgr.14227) 227 : cluster [DBG] pgmap v108: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:27.759042+0000 mon.vm01 (mon.0) 943 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:27.759042+0000 mon.vm01 (mon.0) 943 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:27.764206+0000 mon.vm01 (mon.0) 944 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:27.764206+0000 mon.vm01 (mon.0) 944 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.333182+0000 mon.vm01 (mon.0) 945 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.333182+0000 mon.vm01 (mon.0) 945 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.340171+0000 mon.vm01 (mon.0) 946 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.340171+0000 mon.vm01 (mon.0) 946 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.341231+0000 mon.vm01 (mon.0) 947 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.341231+0000 mon.vm01 (mon.0) 947 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.342086+0000 mon.vm01 (mon.0) 948 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.342086+0000 mon.vm01 (mon.0) 948 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.348511+0000 mon.vm01 (mon.0) 949 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.348511+0000 mon.vm01 (mon.0) 949 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.350655+0000 mon.vm01 (mon.0) 950 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:29.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:28 vm01 bash[28222]: audit 2026-04-16T19:24:28.350655+0000 mon.vm01 (mon.0) 950 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:24:29.919 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:24:30.114 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:24:30.114 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (34s) 1s ago 86s 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:30.114 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 1s ago 84s - - 2026-04-16T19:24:30.114 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (85s) 2s ago 85s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:24:30.114 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (87s) 2s ago 87s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:24:30.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:29 vm04 bash[34817]: cluster 2026-04-16T19:24:28.343719+0000 mgr.vm01.nwhpas (mgr.14227) 228 : cluster [DBG] pgmap v109: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 185 B/s rd, 370 B/s wr, 0 op/s 2026-04-16T19:24:30.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:29 vm04 bash[34817]: cluster 2026-04-16T19:24:28.343719+0000 mgr.vm01.nwhpas (mgr.14227) 228 : cluster [DBG] pgmap v109: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 185 B/s rd, 370 B/s wr, 0 op/s 2026-04-16T19:24:30.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:29 vm04 bash[34817]: cluster 2026-04-16T19:24:29.338141+0000 mon.vm01 (mon.0) 951 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:24:30.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:29 vm04 bash[34817]: cluster 2026-04-16T19:24:29.338141+0000 mon.vm01 (mon.0) 951 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:24:30.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:29 vm01 bash[28222]: cluster 2026-04-16T19:24:28.343719+0000 mgr.vm01.nwhpas (mgr.14227) 228 : cluster [DBG] pgmap v109: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 185 B/s rd, 370 B/s wr, 0 op/s 2026-04-16T19:24:30.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:29 vm01 bash[28222]: cluster 2026-04-16T19:24:28.343719+0000 mgr.vm01.nwhpas (mgr.14227) 228 : cluster [DBG] pgmap v109: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 185 B/s rd, 370 B/s wr, 0 op/s 2026-04-16T19:24:30.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:29 vm01 bash[28222]: cluster 2026-04-16T19:24:29.338141+0000 mon.vm01 (mon.0) 951 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:24:30.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:29 vm01 bash[28222]: cluster 2026-04-16T19:24:29.338141+0000 mon.vm01 (mon.0) 951 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:24:30.350 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:24:30.350 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:24:30.350 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:24:31.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:30 vm04 bash[34817]: audit 2026-04-16T19:24:30.349836+0000 mon.vm01 (mon.0) 952 : audit [DBG] from='client.? 192.168.123.101:0/3380325917' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:31.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:30 vm04 bash[34817]: audit 2026-04-16T19:24:30.349836+0000 mon.vm01 (mon.0) 952 : audit [DBG] from='client.? 192.168.123.101:0/3380325917' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:31.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:30 vm01 bash[28222]: audit 2026-04-16T19:24:30.349836+0000 mon.vm01 (mon.0) 952 : audit [DBG] from='client.? 192.168.123.101:0/3380325917' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:31.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:30 vm01 bash[28222]: audit 2026-04-16T19:24:30.349836+0000 mon.vm01 (mon.0) 952 : audit [DBG] from='client.? 192.168.123.101:0/3380325917' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:32.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:31 vm04 bash[34817]: audit 2026-04-16T19:24:29.901777+0000 mgr.vm01.nwhpas (mgr.14227) 229 : audit [DBG] from='client.14876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:32.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:31 vm04 bash[34817]: audit 2026-04-16T19:24:29.901777+0000 mgr.vm01.nwhpas (mgr.14227) 229 : audit [DBG] from='client.14876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:32.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:31 vm04 bash[34817]: audit 2026-04-16T19:24:30.110633+0000 mgr.vm01.nwhpas (mgr.14227) 230 : audit [DBG] from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:32.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:31 vm04 bash[34817]: audit 2026-04-16T19:24:30.110633+0000 mgr.vm01.nwhpas (mgr.14227) 230 : audit [DBG] from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:32.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:31 vm04 bash[34817]: cluster 2026-04-16T19:24:30.344162+0000 mgr.vm01.nwhpas (mgr.14227) 231 : cluster [DBG] pgmap v110: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:32.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:31 vm04 bash[34817]: cluster 2026-04-16T19:24:30.344162+0000 mgr.vm01.nwhpas (mgr.14227) 231 : cluster [DBG] pgmap v110: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:31 vm01 bash[28222]: audit 2026-04-16T19:24:29.901777+0000 mgr.vm01.nwhpas (mgr.14227) 229 : audit [DBG] from='client.14876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:31 vm01 bash[28222]: audit 2026-04-16T19:24:29.901777+0000 mgr.vm01.nwhpas (mgr.14227) 229 : audit [DBG] from='client.14876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:31 vm01 bash[28222]: audit 2026-04-16T19:24:30.110633+0000 mgr.vm01.nwhpas (mgr.14227) 230 : audit [DBG] from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:31 vm01 bash[28222]: audit 2026-04-16T19:24:30.110633+0000 mgr.vm01.nwhpas (mgr.14227) 230 : audit [DBG] from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:31 vm01 bash[28222]: cluster 2026-04-16T19:24:30.344162+0000 mgr.vm01.nwhpas (mgr.14227) 231 : cluster [DBG] pgmap v110: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:32.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:31 vm01 bash[28222]: cluster 2026-04-16T19:24:30.344162+0000 mgr.vm01.nwhpas (mgr.14227) 231 : cluster [DBG] pgmap v110: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:34.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:33 vm04 bash[34817]: cluster 2026-04-16T19:24:32.344610+0000 mgr.vm01.nwhpas (mgr.14227) 232 : cluster [DBG] pgmap v111: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:34.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:33 vm04 bash[34817]: cluster 2026-04-16T19:24:32.344610+0000 mgr.vm01.nwhpas (mgr.14227) 232 : cluster [DBG] pgmap v111: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:34.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:33 vm01 bash[28222]: cluster 2026-04-16T19:24:32.344610+0000 mgr.vm01.nwhpas (mgr.14227) 232 : cluster [DBG] pgmap v111: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:34.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:33 vm01 bash[28222]: cluster 2026-04-16T19:24:32.344610+0000 mgr.vm01.nwhpas (mgr.14227) 232 : cluster [DBG] pgmap v111: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:35.567 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:24:35.758 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:24:35.758 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (40s) 7s ago 92s 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:35.758 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 7s ago 90s - - 2026-04-16T19:24:35.759 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (91s) 8s ago 91s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:24:35.759 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (93s) 8s ago 93s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:24:35.993 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:24:35.993 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:24:35.993 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:24:36.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:35 vm04 bash[34817]: cluster 2026-04-16T19:24:34.345050+0000 mgr.vm01.nwhpas (mgr.14227) 233 : cluster [DBG] pgmap v112: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:36.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:35 vm04 bash[34817]: cluster 2026-04-16T19:24:34.345050+0000 mgr.vm01.nwhpas (mgr.14227) 233 : cluster [DBG] pgmap v112: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:36.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:35 vm01 bash[28222]: cluster 2026-04-16T19:24:34.345050+0000 mgr.vm01.nwhpas (mgr.14227) 233 : cluster [DBG] pgmap v112: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:36.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:35 vm01 bash[28222]: cluster 2026-04-16T19:24:34.345050+0000 mgr.vm01.nwhpas (mgr.14227) 233 : cluster [DBG] pgmap v112: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:36 vm01 bash[28222]: audit 2026-04-16T19:24:35.549805+0000 mgr.vm01.nwhpas (mgr.14227) 234 : audit [DBG] from='client.14888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:36 vm01 bash[28222]: audit 2026-04-16T19:24:35.549805+0000 mgr.vm01.nwhpas (mgr.14227) 234 : audit [DBG] from='client.14888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:36 vm01 bash[28222]: audit 2026-04-16T19:24:35.755593+0000 mgr.vm01.nwhpas (mgr.14227) 235 : audit [DBG] from='client.14892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:36 vm01 bash[28222]: audit 2026-04-16T19:24:35.755593+0000 mgr.vm01.nwhpas (mgr.14227) 235 : audit [DBG] from='client.14892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:36 vm01 bash[28222]: audit 2026-04-16T19:24:35.992551+0000 mon.vm01 (mon.0) 953 : audit [DBG] from='client.? 192.168.123.101:0/967294542' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:36.958 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:36 vm01 bash[28222]: audit 2026-04-16T19:24:35.992551+0000 mon.vm01 (mon.0) 953 : audit [DBG] from='client.? 192.168.123.101:0/967294542' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:37.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:36 vm04 bash[34817]: audit 2026-04-16T19:24:35.549805+0000 mgr.vm01.nwhpas (mgr.14227) 234 : audit [DBG] from='client.14888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:37.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:36 vm04 bash[34817]: audit 2026-04-16T19:24:35.549805+0000 mgr.vm01.nwhpas (mgr.14227) 234 : audit [DBG] from='client.14888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:37.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:36 vm04 bash[34817]: audit 2026-04-16T19:24:35.755593+0000 mgr.vm01.nwhpas (mgr.14227) 235 : audit [DBG] from='client.14892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:37.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:36 vm04 bash[34817]: audit 2026-04-16T19:24:35.755593+0000 mgr.vm01.nwhpas (mgr.14227) 235 : audit [DBG] from='client.14892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:37.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:36 vm04 bash[34817]: audit 2026-04-16T19:24:35.992551+0000 mon.vm01 (mon.0) 953 : audit [DBG] from='client.? 192.168.123.101:0/967294542' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:37.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:36 vm04 bash[34817]: audit 2026-04-16T19:24:35.992551+0000 mon.vm01 (mon.0) 953 : audit [DBG] from='client.? 192.168.123.101:0/967294542' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:38.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:37 vm04 bash[34817]: cluster 2026-04-16T19:24:36.345525+0000 mgr.vm01.nwhpas (mgr.14227) 236 : cluster [DBG] pgmap v113: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:38.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:37 vm04 bash[34817]: cluster 2026-04-16T19:24:36.345525+0000 mgr.vm01.nwhpas (mgr.14227) 236 : cluster [DBG] pgmap v113: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:38.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:37 vm04 bash[34817]: audit 2026-04-16T19:24:37.568975+0000 mon.vm01 (mon.0) 954 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:38.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:37 vm04 bash[34817]: audit 2026-04-16T19:24:37.568975+0000 mon.vm01 (mon.0) 954 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:38.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:37 vm04 bash[34817]: audit 2026-04-16T19:24:37.569429+0000 mon.vm01 (mon.0) 955 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:38.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:37 vm04 bash[34817]: audit 2026-04-16T19:24:37.569429+0000 mon.vm01 (mon.0) 955 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:38.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:37 vm01 bash[28222]: cluster 2026-04-16T19:24:36.345525+0000 mgr.vm01.nwhpas (mgr.14227) 236 : cluster [DBG] pgmap v113: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:38.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:37 vm01 bash[28222]: cluster 2026-04-16T19:24:36.345525+0000 mgr.vm01.nwhpas (mgr.14227) 236 : cluster [DBG] pgmap v113: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:38.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:37 vm01 bash[28222]: audit 2026-04-16T19:24:37.568975+0000 mon.vm01 (mon.0) 954 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:38.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:37 vm01 bash[28222]: audit 2026-04-16T19:24:37.568975+0000 mon.vm01 (mon.0) 954 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:24:38.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:37 vm01 bash[28222]: audit 2026-04-16T19:24:37.569429+0000 mon.vm01 (mon.0) 955 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:38.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:37 vm01 bash[28222]: audit 2026-04-16T19:24:37.569429+0000 mon.vm01 (mon.0) 955 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:40.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:39 vm04 bash[34817]: cluster 2026-04-16T19:24:38.345901+0000 mgr.vm01.nwhpas (mgr.14227) 237 : cluster [DBG] pgmap v114: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:40.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:39 vm04 bash[34817]: cluster 2026-04-16T19:24:38.345901+0000 mgr.vm01.nwhpas (mgr.14227) 237 : cluster [DBG] pgmap v114: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:40.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:39 vm01 bash[28222]: cluster 2026-04-16T19:24:38.345901+0000 mgr.vm01.nwhpas (mgr.14227) 237 : cluster [DBG] pgmap v114: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:40.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:39 vm01 bash[28222]: cluster 2026-04-16T19:24:38.345901+0000 mgr.vm01.nwhpas (mgr.14227) 237 : cluster [DBG] pgmap v114: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 B/s rd, 185 B/s wr, 0 op/s 2026-04-16T19:24:41.199 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:24:41.379 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:24:41.379 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (45s) 13s ago 98s 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:41.379 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 13s ago 95s - - 2026-04-16T19:24:41.379 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (96s) 13s ago 97s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:24:41.379 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (98s) 13s ago 99s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:24:41.608 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:24:41.608 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:24:41.608 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:24:42.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:41 vm04 bash[34817]: cluster 2026-04-16T19:24:40.346356+0000 mgr.vm01.nwhpas (mgr.14227) 238 : cluster [DBG] pgmap v115: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:42.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:41 vm04 bash[34817]: cluster 2026-04-16T19:24:40.346356+0000 mgr.vm01.nwhpas (mgr.14227) 238 : cluster [DBG] pgmap v115: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:42.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:41 vm04 bash[34817]: audit 2026-04-16T19:24:41.607428+0000 mon.vm04 (mon.1) 36 : audit [DBG] from='client.? 192.168.123.101:0/3658216980' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:42.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:41 vm04 bash[34817]: audit 2026-04-16T19:24:41.607428+0000 mon.vm04 (mon.1) 36 : audit [DBG] from='client.? 192.168.123.101:0/3658216980' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:41 vm01 bash[28222]: cluster 2026-04-16T19:24:40.346356+0000 mgr.vm01.nwhpas (mgr.14227) 238 : cluster [DBG] pgmap v115: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:41 vm01 bash[28222]: cluster 2026-04-16T19:24:40.346356+0000 mgr.vm01.nwhpas (mgr.14227) 238 : cluster [DBG] pgmap v115: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:41 vm01 bash[28222]: audit 2026-04-16T19:24:41.607428+0000 mon.vm04 (mon.1) 36 : audit [DBG] from='client.? 192.168.123.101:0/3658216980' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:42.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:41 vm01 bash[28222]: audit 2026-04-16T19:24:41.607428+0000 mon.vm04 (mon.1) 36 : audit [DBG] from='client.? 192.168.123.101:0/3658216980' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:43.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:42 vm04 bash[34817]: audit 2026-04-16T19:24:41.182033+0000 mgr.vm01.nwhpas (mgr.14227) 239 : audit [DBG] from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:43.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:42 vm04 bash[34817]: audit 2026-04-16T19:24:41.182033+0000 mgr.vm01.nwhpas (mgr.14227) 239 : audit [DBG] from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:43.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:42 vm04 bash[34817]: audit 2026-04-16T19:24:41.375794+0000 mgr.vm01.nwhpas (mgr.14227) 240 : audit [DBG] from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:43.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:42 vm04 bash[34817]: audit 2026-04-16T19:24:41.375794+0000 mgr.vm01.nwhpas (mgr.14227) 240 : audit [DBG] from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:42 vm01 bash[28222]: audit 2026-04-16T19:24:41.182033+0000 mgr.vm01.nwhpas (mgr.14227) 239 : audit [DBG] from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:42 vm01 bash[28222]: audit 2026-04-16T19:24:41.182033+0000 mgr.vm01.nwhpas (mgr.14227) 239 : audit [DBG] from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:42 vm01 bash[28222]: audit 2026-04-16T19:24:41.375794+0000 mgr.vm01.nwhpas (mgr.14227) 240 : audit [DBG] from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:43.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:42 vm01 bash[28222]: audit 2026-04-16T19:24:41.375794+0000 mgr.vm01.nwhpas (mgr.14227) 240 : audit [DBG] from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:44.206 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:43 vm04 bash[34817]: cluster 2026-04-16T19:24:42.346718+0000 mgr.vm01.nwhpas (mgr.14227) 241 : cluster [DBG] pgmap v116: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:44.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:43 vm04 bash[34817]: cluster 2026-04-16T19:24:42.346718+0000 mgr.vm01.nwhpas (mgr.14227) 241 : cluster [DBG] pgmap v116: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:43 vm01 bash[28222]: cluster 2026-04-16T19:24:42.346718+0000 mgr.vm01.nwhpas (mgr.14227) 241 : cluster [DBG] pgmap v116: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:44.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:43 vm01 bash[28222]: cluster 2026-04-16T19:24:42.346718+0000 mgr.vm01.nwhpas (mgr.14227) 241 : cluster [DBG] pgmap v116: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:46.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:45 vm04 bash[34817]: cluster 2026-04-16T19:24:44.347149+0000 mgr.vm01.nwhpas (mgr.14227) 242 : cluster [DBG] pgmap v117: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:46.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:45 vm04 bash[34817]: cluster 2026-04-16T19:24:44.347149+0000 mgr.vm01.nwhpas (mgr.14227) 242 : cluster [DBG] pgmap v117: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:46.208 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:45 vm01 bash[28222]: cluster 2026-04-16T19:24:44.347149+0000 mgr.vm01.nwhpas (mgr.14227) 242 : cluster [DBG] pgmap v117: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:46.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:45 vm01 bash[28222]: cluster 2026-04-16T19:24:44.347149+0000 mgr.vm01.nwhpas (mgr.14227) 242 : cluster [DBG] pgmap v117: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:46.846 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:24:47.041 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:24:47.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (51s) 18s ago 103s 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:47.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 18s ago 101s - - 2026-04-16T19:24:47.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (102s) 19s ago 102s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:24:47.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (104s) 19s ago 104s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:24:47.307 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:24:47.308 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:24:47.308 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:24:48.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:47 vm04 bash[34817]: cluster 2026-04-16T19:24:46.347764+0000 mgr.vm01.nwhpas (mgr.14227) 243 : cluster [DBG] pgmap v118: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:48.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:47 vm04 bash[34817]: cluster 2026-04-16T19:24:46.347764+0000 mgr.vm01.nwhpas (mgr.14227) 243 : cluster [DBG] pgmap v118: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:48.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:47 vm04 bash[34817]: audit 2026-04-16T19:24:46.821972+0000 mgr.vm01.nwhpas (mgr.14227) 244 : audit [DBG] from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:48.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:47 vm04 bash[34817]: audit 2026-04-16T19:24:46.821972+0000 mgr.vm01.nwhpas (mgr.14227) 244 : audit [DBG] from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:48.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:47 vm04 bash[34817]: audit 2026-04-16T19:24:47.306360+0000 mon.vm01 (mon.0) 956 : audit [DBG] from='client.? 192.168.123.101:0/3571602630' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:48.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:47 vm04 bash[34817]: audit 2026-04-16T19:24:47.306360+0000 mon.vm01 (mon.0) 956 : audit [DBG] from='client.? 192.168.123.101:0/3571602630' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:48.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:47 vm01 bash[28222]: cluster 2026-04-16T19:24:46.347764+0000 mgr.vm01.nwhpas (mgr.14227) 243 : cluster [DBG] pgmap v118: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:48.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:47 vm01 bash[28222]: cluster 2026-04-16T19:24:46.347764+0000 mgr.vm01.nwhpas (mgr.14227) 243 : cluster [DBG] pgmap v118: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:48.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:47 vm01 bash[28222]: audit 2026-04-16T19:24:46.821972+0000 mgr.vm01.nwhpas (mgr.14227) 244 : audit [DBG] from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:48.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:47 vm01 bash[28222]: audit 2026-04-16T19:24:46.821972+0000 mgr.vm01.nwhpas (mgr.14227) 244 : audit [DBG] from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:48.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:47 vm01 bash[28222]: audit 2026-04-16T19:24:47.306360+0000 mon.vm01 (mon.0) 956 : audit [DBG] from='client.? 192.168.123.101:0/3571602630' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:48.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:47 vm01 bash[28222]: audit 2026-04-16T19:24:47.306360+0000 mon.vm01 (mon.0) 956 : audit [DBG] from='client.? 192.168.123.101:0/3571602630' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:49.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:48 vm04 bash[34817]: audit 2026-04-16T19:24:47.037130+0000 mgr.vm01.nwhpas (mgr.14227) 245 : audit [DBG] from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:49.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:48 vm04 bash[34817]: audit 2026-04-16T19:24:47.037130+0000 mgr.vm01.nwhpas (mgr.14227) 245 : audit [DBG] from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:49.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:48 vm01 bash[28222]: audit 2026-04-16T19:24:47.037130+0000 mgr.vm01.nwhpas (mgr.14227) 245 : audit [DBG] from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:49.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:48 vm01 bash[28222]: audit 2026-04-16T19:24:47.037130+0000 mgr.vm01.nwhpas (mgr.14227) 245 : audit [DBG] from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:50.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:49 vm04 bash[34817]: cluster 2026-04-16T19:24:48.348148+0000 mgr.vm01.nwhpas (mgr.14227) 246 : cluster [DBG] pgmap v119: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:50.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:49 vm04 bash[34817]: cluster 2026-04-16T19:24:48.348148+0000 mgr.vm01.nwhpas (mgr.14227) 246 : cluster [DBG] pgmap v119: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:50.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:49 vm01 bash[28222]: cluster 2026-04-16T19:24:48.348148+0000 mgr.vm01.nwhpas (mgr.14227) 246 : cluster [DBG] pgmap v119: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:50.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:49 vm01 bash[28222]: cluster 2026-04-16T19:24:48.348148+0000 mgr.vm01.nwhpas (mgr.14227) 246 : cluster [DBG] pgmap v119: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:52.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:51 vm04 bash[34817]: cluster 2026-04-16T19:24:50.348606+0000 mgr.vm01.nwhpas (mgr.14227) 247 : cluster [DBG] pgmap v120: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:52.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:51 vm04 bash[34817]: cluster 2026-04-16T19:24:50.348606+0000 mgr.vm01.nwhpas (mgr.14227) 247 : cluster [DBG] pgmap v120: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:52.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:51 vm01 bash[28222]: cluster 2026-04-16T19:24:50.348606+0000 mgr.vm01.nwhpas (mgr.14227) 247 : cluster [DBG] pgmap v120: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:52.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:51 vm01 bash[28222]: cluster 2026-04-16T19:24:50.348606+0000 mgr.vm01.nwhpas (mgr.14227) 247 : cluster [DBG] pgmap v120: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:52.521 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:24:52.726 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:24:52.726 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (57s) 24s ago 109s 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:52.726 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 24s ago 107s - - 2026-04-16T19:24:52.726 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (108s) 24s ago 108s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:24:52.726 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (110s) 24s ago 110s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:24:52.952 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:24:52.952 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:24:52.952 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:24:53.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:52 vm04 bash[34817]: audit 2026-04-16T19:24:52.564556+0000 mon.vm01 (mon.0) 957 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:53.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:52 vm04 bash[34817]: audit 2026-04-16T19:24:52.564556+0000 mon.vm01 (mon.0) 957 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:53.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:52 vm01 bash[28222]: audit 2026-04-16T19:24:52.564556+0000 mon.vm01 (mon.0) 957 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:53.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:52 vm01 bash[28222]: audit 2026-04-16T19:24:52.564556+0000 mon.vm01 (mon.0) 957 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:24:54.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:53 vm04 bash[34817]: cluster 2026-04-16T19:24:52.349009+0000 mgr.vm01.nwhpas (mgr.14227) 248 : cluster [DBG] pgmap v121: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:54.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:53 vm04 bash[34817]: cluster 2026-04-16T19:24:52.349009+0000 mgr.vm01.nwhpas (mgr.14227) 248 : cluster [DBG] pgmap v121: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:54.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:53 vm04 bash[34817]: audit 2026-04-16T19:24:52.503449+0000 mgr.vm01.nwhpas (mgr.14227) 249 : audit [DBG] from='client.14924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:54.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:53 vm04 bash[34817]: audit 2026-04-16T19:24:52.503449+0000 mgr.vm01.nwhpas (mgr.14227) 249 : audit [DBG] from='client.14924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:54.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:53 vm04 bash[34817]: audit 2026-04-16T19:24:52.721903+0000 mgr.vm01.nwhpas (mgr.14227) 250 : audit [DBG] from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:54.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:53 vm04 bash[34817]: audit 2026-04-16T19:24:52.721903+0000 mgr.vm01.nwhpas (mgr.14227) 250 : audit [DBG] from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:54.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:53 vm04 bash[34817]: audit 2026-04-16T19:24:52.950952+0000 mon.vm01 (mon.0) 958 : audit [DBG] from='client.? 192.168.123.101:0/1971286039' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:54.207 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:53 vm04 bash[34817]: audit 2026-04-16T19:24:52.950952+0000 mon.vm01 (mon.0) 958 : audit [DBG] from='client.? 192.168.123.101:0/1971286039' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:54.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:53 vm01 bash[28222]: cluster 2026-04-16T19:24:52.349009+0000 mgr.vm01.nwhpas (mgr.14227) 248 : cluster [DBG] pgmap v121: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:54.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:53 vm01 bash[28222]: cluster 2026-04-16T19:24:52.349009+0000 mgr.vm01.nwhpas (mgr.14227) 248 : cluster [DBG] pgmap v121: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:54.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:53 vm01 bash[28222]: audit 2026-04-16T19:24:52.503449+0000 mgr.vm01.nwhpas (mgr.14227) 249 : audit [DBG] from='client.14924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:54.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:53 vm01 bash[28222]: audit 2026-04-16T19:24:52.503449+0000 mgr.vm01.nwhpas (mgr.14227) 249 : audit [DBG] from='client.14924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:54.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:53 vm01 bash[28222]: audit 2026-04-16T19:24:52.721903+0000 mgr.vm01.nwhpas (mgr.14227) 250 : audit [DBG] from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:54.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:53 vm01 bash[28222]: audit 2026-04-16T19:24:52.721903+0000 mgr.vm01.nwhpas (mgr.14227) 250 : audit [DBG] from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:54.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:53 vm01 bash[28222]: audit 2026-04-16T19:24:52.950952+0000 mon.vm01 (mon.0) 958 : audit [DBG] from='client.? 192.168.123.101:0/1971286039' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:54.209 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:53 vm01 bash[28222]: audit 2026-04-16T19:24:52.950952+0000 mon.vm01 (mon.0) 958 : audit [DBG] from='client.? 192.168.123.101:0/1971286039' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:55.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:55 vm04 bash[34817]: cluster 2026-04-16T19:24:54.349430+0000 mgr.vm01.nwhpas (mgr.14227) 251 : cluster [DBG] pgmap v122: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:55.457 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:55 vm04 bash[34817]: cluster 2026-04-16T19:24:54.349430+0000 mgr.vm01.nwhpas (mgr.14227) 251 : cluster [DBG] pgmap v122: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:55.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:55 vm01 bash[28222]: cluster 2026-04-16T19:24:54.349430+0000 mgr.vm01.nwhpas (mgr.14227) 251 : cluster [DBG] pgmap v122: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:55.459 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:55 vm01 bash[28222]: cluster 2026-04-16T19:24:54.349430+0000 mgr.vm01.nwhpas (mgr.14227) 251 : cluster [DBG] pgmap v122: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:24:57.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:57 vm04 bash[34817]: cluster 2026-04-16T19:24:56.349832+0000 mgr.vm01.nwhpas (mgr.14227) 252 : cluster [DBG] pgmap v123: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:57.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:57 vm04 bash[34817]: cluster 2026-04-16T19:24:56.349832+0000 mgr.vm01.nwhpas (mgr.14227) 252 : cluster [DBG] pgmap v123: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:57.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:57 vm01 bash[28222]: cluster 2026-04-16T19:24:56.349832+0000 mgr.vm01.nwhpas (mgr.14227) 252 : cluster [DBG] pgmap v123: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:57.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:57 vm01 bash[28222]: cluster 2026-04-16T19:24:56.349832+0000 mgr.vm01.nwhpas (mgr.14227) 252 : cluster [DBG] pgmap v123: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:24:58.170 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:24:58.369 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:24:58.369 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (62s) 30s ago 115s 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:24:58.369 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 30s ago 112s - - 2026-04-16T19:24:58.369 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (113s) 30s ago 114s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:24:58.369 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (115s) 30s ago 116s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:24:58.644 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:24:58.644 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:24:58.645 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:24:59.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:59 vm04 bash[34817]: audit 2026-04-16T19:24:58.150910+0000 mgr.vm01.nwhpas (mgr.14227) 253 : audit [DBG] from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:59.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:59 vm04 bash[34817]: audit 2026-04-16T19:24:58.150910+0000 mgr.vm01.nwhpas (mgr.14227) 253 : audit [DBG] from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:59.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:59 vm04 bash[34817]: cluster 2026-04-16T19:24:58.350324+0000 mgr.vm01.nwhpas (mgr.14227) 254 : cluster [DBG] pgmap v124: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:59.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:59 vm04 bash[34817]: cluster 2026-04-16T19:24:58.350324+0000 mgr.vm01.nwhpas (mgr.14227) 254 : cluster [DBG] pgmap v124: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:59.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:59 vm04 bash[34817]: audit 2026-04-16T19:24:58.364001+0000 mgr.vm01.nwhpas (mgr.14227) 255 : audit [DBG] from='client.14940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:59.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:59 vm04 bash[34817]: audit 2026-04-16T19:24:58.364001+0000 mgr.vm01.nwhpas (mgr.14227) 255 : audit [DBG] from='client.14940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:59.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:59 vm04 bash[34817]: audit 2026-04-16T19:24:58.642768+0000 mon.vm01 (mon.0) 959 : audit [DBG] from='client.? 192.168.123.101:0/3765421671' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:59.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:24:59 vm04 bash[34817]: audit 2026-04-16T19:24:58.642768+0000 mon.vm01 (mon.0) 959 : audit [DBG] from='client.? 192.168.123.101:0/3765421671' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:59.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:59 vm01 bash[28222]: audit 2026-04-16T19:24:58.150910+0000 mgr.vm01.nwhpas (mgr.14227) 253 : audit [DBG] from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:59.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:59 vm01 bash[28222]: audit 2026-04-16T19:24:58.150910+0000 mgr.vm01.nwhpas (mgr.14227) 253 : audit [DBG] from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:59.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:59 vm01 bash[28222]: cluster 2026-04-16T19:24:58.350324+0000 mgr.vm01.nwhpas (mgr.14227) 254 : cluster [DBG] pgmap v124: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:59.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:59 vm01 bash[28222]: cluster 2026-04-16T19:24:58.350324+0000 mgr.vm01.nwhpas (mgr.14227) 254 : cluster [DBG] pgmap v124: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:24:59.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:59 vm01 bash[28222]: audit 2026-04-16T19:24:58.364001+0000 mgr.vm01.nwhpas (mgr.14227) 255 : audit [DBG] from='client.14940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:59.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:59 vm01 bash[28222]: audit 2026-04-16T19:24:58.364001+0000 mgr.vm01.nwhpas (mgr.14227) 255 : audit [DBG] from='client.14940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:24:59.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:59 vm01 bash[28222]: audit 2026-04-16T19:24:58.642768+0000 mon.vm01 (mon.0) 959 : audit [DBG] from='client.? 192.168.123.101:0/3765421671' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:24:59.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:24:59 vm01 bash[28222]: audit 2026-04-16T19:24:58.642768+0000 mon.vm01 (mon.0) 959 : audit [DBG] from='client.? 192.168.123.101:0/3765421671' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:01.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:01 vm04 bash[34817]: cluster 2026-04-16T19:25:00.350844+0000 mgr.vm01.nwhpas (mgr.14227) 256 : cluster [DBG] pgmap v125: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:01.707 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:01 vm04 bash[34817]: cluster 2026-04-16T19:25:00.350844+0000 mgr.vm01.nwhpas (mgr.14227) 256 : cluster [DBG] pgmap v125: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:01.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:01 vm01 bash[28222]: cluster 2026-04-16T19:25:00.350844+0000 mgr.vm01.nwhpas (mgr.14227) 256 : cluster [DBG] pgmap v125: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:01.709 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:01 vm01 bash[28222]: cluster 2026-04-16T19:25:00.350844+0000 mgr.vm01.nwhpas (mgr.14227) 256 : cluster [DBG] pgmap v125: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:03.872 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:25:03.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:03 vm04 bash[34817]: cluster 2026-04-16T19:25:02.351375+0000 mgr.vm01.nwhpas (mgr.14227) 257 : cluster [DBG] pgmap v126: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:03.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:03 vm04 bash[34817]: cluster 2026-04-16T19:25:02.351375+0000 mgr.vm01.nwhpas (mgr.14227) 257 : cluster [DBG] pgmap v126: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:03.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:03 vm01 bash[28222]: cluster 2026-04-16T19:25:02.351375+0000 mgr.vm01.nwhpas (mgr.14227) 257 : cluster [DBG] pgmap v126: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:03.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:03 vm01 bash[28222]: cluster 2026-04-16T19:25:02.351375+0000 mgr.vm01.nwhpas (mgr.14227) 257 : cluster [DBG] pgmap v126: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:04.071 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:25:04.071 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (68s) 35s ago 2m 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:25:04.071 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 35s ago 118s - - 2026-04-16T19:25:04.071 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (119s) 36s ago 119s 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:25:04.071 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (2m) 36s ago 2m 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:25:04.309 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:25:04.310 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:25:04.310 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:25:04.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:04 vm04 bash[34817]: audit 2026-04-16T19:25:04.307684+0000 mon.vm01 (mon.0) 960 : audit [DBG] from='client.? 192.168.123.101:0/1561990684' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:04.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:04 vm04 bash[34817]: audit 2026-04-16T19:25:04.307684+0000 mon.vm01 (mon.0) 960 : audit [DBG] from='client.? 192.168.123.101:0/1561990684' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:04.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:04 vm01 bash[28222]: audit 2026-04-16T19:25:04.307684+0000 mon.vm01 (mon.0) 960 : audit [DBG] from='client.? 192.168.123.101:0/1561990684' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:04.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:04 vm01 bash[28222]: audit 2026-04-16T19:25:04.307684+0000 mon.vm01 (mon.0) 960 : audit [DBG] from='client.? 192.168.123.101:0/1561990684' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:05.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:05 vm04 bash[34817]: audit 2026-04-16T19:25:03.852528+0000 mgr.vm01.nwhpas (mgr.14227) 258 : audit [DBG] from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:05.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:05 vm04 bash[34817]: audit 2026-04-16T19:25:03.852528+0000 mgr.vm01.nwhpas (mgr.14227) 258 : audit [DBG] from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:05.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:05 vm04 bash[34817]: audit 2026-04-16T19:25:04.066747+0000 mgr.vm01.nwhpas (mgr.14227) 259 : audit [DBG] from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:05.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:05 vm04 bash[34817]: audit 2026-04-16T19:25:04.066747+0000 mgr.vm01.nwhpas (mgr.14227) 259 : audit [DBG] from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:05.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:05 vm04 bash[34817]: cluster 2026-04-16T19:25:04.351844+0000 mgr.vm01.nwhpas (mgr.14227) 260 : cluster [DBG] pgmap v127: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:05.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:05 vm04 bash[34817]: cluster 2026-04-16T19:25:04.351844+0000 mgr.vm01.nwhpas (mgr.14227) 260 : cluster [DBG] pgmap v127: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:05.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:05 vm01 bash[28222]: audit 2026-04-16T19:25:03.852528+0000 mgr.vm01.nwhpas (mgr.14227) 258 : audit [DBG] from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:05.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:05 vm01 bash[28222]: audit 2026-04-16T19:25:03.852528+0000 mgr.vm01.nwhpas (mgr.14227) 258 : audit [DBG] from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:05.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:05 vm01 bash[28222]: audit 2026-04-16T19:25:04.066747+0000 mgr.vm01.nwhpas (mgr.14227) 259 : audit [DBG] from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:05.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:05 vm01 bash[28222]: audit 2026-04-16T19:25:04.066747+0000 mgr.vm01.nwhpas (mgr.14227) 259 : audit [DBG] from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:05.959 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:05 vm01 bash[28222]: cluster 2026-04-16T19:25:04.351844+0000 mgr.vm01.nwhpas (mgr.14227) 260 : cluster [DBG] pgmap v127: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:05.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:05 vm01 bash[28222]: cluster 2026-04-16T19:25:04.351844+0000 mgr.vm01.nwhpas (mgr.14227) 260 : cluster [DBG] pgmap v127: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:07.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:07 vm04 bash[34817]: cluster 2026-04-16T19:25:06.352321+0000 mgr.vm01.nwhpas (mgr.14227) 261 : cluster [DBG] pgmap v128: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:07.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:07 vm04 bash[34817]: cluster 2026-04-16T19:25:06.352321+0000 mgr.vm01.nwhpas (mgr.14227) 261 : cluster [DBG] pgmap v128: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:07.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:07 vm04 bash[34817]: audit 2026-04-16T19:25:07.564966+0000 mon.vm01 (mon.0) 961 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:07.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:07 vm04 bash[34817]: audit 2026-04-16T19:25:07.564966+0000 mon.vm01 (mon.0) 961 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:07.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:07 vm01 bash[28222]: cluster 2026-04-16T19:25:06.352321+0000 mgr.vm01.nwhpas (mgr.14227) 261 : cluster [DBG] pgmap v128: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:07.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:07 vm01 bash[28222]: cluster 2026-04-16T19:25:06.352321+0000 mgr.vm01.nwhpas (mgr.14227) 261 : cluster [DBG] pgmap v128: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:07.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:07 vm01 bash[28222]: audit 2026-04-16T19:25:07.564966+0000 mon.vm01 (mon.0) 961 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:07.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:07 vm01 bash[28222]: audit 2026-04-16T19:25:07.564966+0000 mon.vm01 (mon.0) 961 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:09.521 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:25:09.705 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:25:09.705 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (74s) 41s ago 2m 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:25:09.705 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 41s ago 2m - - 2026-04-16T19:25:09.705 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 41s ago 2m 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:25:09.705 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (2m) 41s ago 2m 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:25:09.939 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:25:09.939 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:25:09.939 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:25:09.957 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:09 vm04 bash[34817]: cluster 2026-04-16T19:25:08.352665+0000 mgr.vm01.nwhpas (mgr.14227) 262 : cluster [DBG] pgmap v129: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:09.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:09 vm04 bash[34817]: cluster 2026-04-16T19:25:08.352665+0000 mgr.vm01.nwhpas (mgr.14227) 262 : cluster [DBG] pgmap v129: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:09.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:09 vm01 bash[28222]: cluster 2026-04-16T19:25:08.352665+0000 mgr.vm01.nwhpas (mgr.14227) 262 : cluster [DBG] pgmap v129: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:09.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:09 vm01 bash[28222]: cluster 2026-04-16T19:25:08.352665+0000 mgr.vm01.nwhpas (mgr.14227) 262 : cluster [DBG] pgmap v129: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:10.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:10 vm04 bash[34817]: audit 2026-04-16T19:25:09.502298+0000 mgr.vm01.nwhpas (mgr.14227) 263 : audit [DBG] from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:10.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:10 vm04 bash[34817]: audit 2026-04-16T19:25:09.502298+0000 mgr.vm01.nwhpas (mgr.14227) 263 : audit [DBG] from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:10.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:10 vm04 bash[34817]: audit 2026-04-16T19:25:09.699667+0000 mgr.vm01.nwhpas (mgr.14227) 264 : audit [DBG] from='client.14964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:10.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:10 vm04 bash[34817]: audit 2026-04-16T19:25:09.699667+0000 mgr.vm01.nwhpas (mgr.14227) 264 : audit [DBG] from='client.14964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:10.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:10 vm04 bash[34817]: audit 2026-04-16T19:25:09.936786+0000 mon.vm01 (mon.0) 962 : audit [DBG] from='client.? 192.168.123.101:0/2123747421' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:10.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:10 vm04 bash[34817]: audit 2026-04-16T19:25:09.936786+0000 mon.vm01 (mon.0) 962 : audit [DBG] from='client.? 192.168.123.101:0/2123747421' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:10.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:10 vm01 bash[28222]: audit 2026-04-16T19:25:09.502298+0000 mgr.vm01.nwhpas (mgr.14227) 263 : audit [DBG] from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:10.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:10 vm01 bash[28222]: audit 2026-04-16T19:25:09.502298+0000 mgr.vm01.nwhpas (mgr.14227) 263 : audit [DBG] from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:10.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:10 vm01 bash[28222]: audit 2026-04-16T19:25:09.699667+0000 mgr.vm01.nwhpas (mgr.14227) 264 : audit [DBG] from='client.14964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:10.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:10 vm01 bash[28222]: audit 2026-04-16T19:25:09.699667+0000 mgr.vm01.nwhpas (mgr.14227) 264 : audit [DBG] from='client.14964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:10.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:10 vm01 bash[28222]: audit 2026-04-16T19:25:09.936786+0000 mon.vm01 (mon.0) 962 : audit [DBG] from='client.? 192.168.123.101:0/2123747421' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:10.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:10 vm01 bash[28222]: audit 2026-04-16T19:25:09.936786+0000 mon.vm01 (mon.0) 962 : audit [DBG] from='client.? 192.168.123.101:0/2123747421' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:11.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:11 vm04 bash[34817]: cluster 2026-04-16T19:25:10.353080+0000 mgr.vm01.nwhpas (mgr.14227) 265 : cluster [DBG] pgmap v130: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:11.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:11 vm04 bash[34817]: cluster 2026-04-16T19:25:10.353080+0000 mgr.vm01.nwhpas (mgr.14227) 265 : cluster [DBG] pgmap v130: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:11.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:11 vm01 bash[28222]: cluster 2026-04-16T19:25:10.353080+0000 mgr.vm01.nwhpas (mgr.14227) 265 : cluster [DBG] pgmap v130: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:11.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:11 vm01 bash[28222]: cluster 2026-04-16T19:25:10.353080+0000 mgr.vm01.nwhpas (mgr.14227) 265 : cluster [DBG] pgmap v130: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:14.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:13 vm04 bash[34817]: cluster 2026-04-16T19:25:12.353457+0000 mgr.vm01.nwhpas (mgr.14227) 266 : cluster [DBG] pgmap v131: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:14.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:13 vm04 bash[34817]: cluster 2026-04-16T19:25:12.353457+0000 mgr.vm01.nwhpas (mgr.14227) 266 : cluster [DBG] pgmap v131: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:14.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:13 vm01 bash[28222]: cluster 2026-04-16T19:25:12.353457+0000 mgr.vm01.nwhpas (mgr.14227) 266 : cluster [DBG] pgmap v131: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:14.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:13 vm01 bash[28222]: cluster 2026-04-16T19:25:12.353457+0000 mgr.vm01.nwhpas (mgr.14227) 266 : cluster [DBG] pgmap v131: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:15.150 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:25:15.335 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:25:15.335 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (79s) 47s ago 2m 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:25:15.335 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 47s ago 2m - - 2026-04-16T19:25:15.335 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 47s ago 2m 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:25:15.335 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (2m) 47s ago 2m 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:25:15.565 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:25:15.565 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:25:15.565 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:25:16.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:15 vm04 bash[34817]: cluster 2026-04-16T19:25:14.353845+0000 mgr.vm01.nwhpas (mgr.14227) 267 : cluster [DBG] pgmap v132: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:16.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:15 vm04 bash[34817]: cluster 2026-04-16T19:25:14.353845+0000 mgr.vm01.nwhpas (mgr.14227) 267 : cluster [DBG] pgmap v132: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:16.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:15 vm04 bash[34817]: audit 2026-04-16T19:25:15.562769+0000 mon.vm01 (mon.0) 963 : audit [DBG] from='client.? 192.168.123.101:0/3964413649' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:16.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:15 vm04 bash[34817]: audit 2026-04-16T19:25:15.562769+0000 mon.vm01 (mon.0) 963 : audit [DBG] from='client.? 192.168.123.101:0/3964413649' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:16.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:15 vm01 bash[28222]: cluster 2026-04-16T19:25:14.353845+0000 mgr.vm01.nwhpas (mgr.14227) 267 : cluster [DBG] pgmap v132: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:16.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:15 vm01 bash[28222]: cluster 2026-04-16T19:25:14.353845+0000 mgr.vm01.nwhpas (mgr.14227) 267 : cluster [DBG] pgmap v132: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:16.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:15 vm01 bash[28222]: audit 2026-04-16T19:25:15.562769+0000 mon.vm01 (mon.0) 963 : audit [DBG] from='client.? 192.168.123.101:0/3964413649' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:16.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:15 vm01 bash[28222]: audit 2026-04-16T19:25:15.562769+0000 mon.vm01 (mon.0) 963 : audit [DBG] from='client.? 192.168.123.101:0/3964413649' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:16.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:16 vm01 bash[28222]: audit 2026-04-16T19:25:15.131133+0000 mgr.vm01.nwhpas (mgr.14227) 268 : audit [DBG] from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:16.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:16 vm01 bash[28222]: audit 2026-04-16T19:25:15.131133+0000 mgr.vm01.nwhpas (mgr.14227) 268 : audit [DBG] from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:16.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:16 vm01 bash[28222]: audit 2026-04-16T19:25:15.330257+0000 mgr.vm01.nwhpas (mgr.14227) 269 : audit [DBG] from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:16.960 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:16 vm01 bash[28222]: audit 2026-04-16T19:25:15.330257+0000 mgr.vm01.nwhpas (mgr.14227) 269 : audit [DBG] from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:17.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:16 vm04 bash[34817]: audit 2026-04-16T19:25:15.131133+0000 mgr.vm01.nwhpas (mgr.14227) 268 : audit [DBG] from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:17.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:16 vm04 bash[34817]: audit 2026-04-16T19:25:15.131133+0000 mgr.vm01.nwhpas (mgr.14227) 268 : audit [DBG] from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:17.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:16 vm04 bash[34817]: audit 2026-04-16T19:25:15.330257+0000 mgr.vm01.nwhpas (mgr.14227) 269 : audit [DBG] from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:17.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:16 vm04 bash[34817]: audit 2026-04-16T19:25:15.330257+0000 mgr.vm01.nwhpas (mgr.14227) 269 : audit [DBG] from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:18.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:17 vm04 bash[34817]: cluster 2026-04-16T19:25:16.354243+0000 mgr.vm01.nwhpas (mgr.14227) 270 : cluster [DBG] pgmap v133: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:18.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:17 vm04 bash[34817]: cluster 2026-04-16T19:25:16.354243+0000 mgr.vm01.nwhpas (mgr.14227) 270 : cluster [DBG] pgmap v133: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:18.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:17 vm01 bash[28222]: cluster 2026-04-16T19:25:16.354243+0000 mgr.vm01.nwhpas (mgr.14227) 270 : cluster [DBG] pgmap v133: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:18.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:17 vm01 bash[28222]: cluster 2026-04-16T19:25:16.354243+0000 mgr.vm01.nwhpas (mgr.14227) 270 : cluster [DBG] pgmap v133: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:20.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:19 vm04 bash[34817]: cluster 2026-04-16T19:25:18.354734+0000 mgr.vm01.nwhpas (mgr.14227) 271 : cluster [DBG] pgmap v134: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:20.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:19 vm04 bash[34817]: cluster 2026-04-16T19:25:18.354734+0000 mgr.vm01.nwhpas (mgr.14227) 271 : cluster [DBG] pgmap v134: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:20.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:19 vm01 bash[28222]: cluster 2026-04-16T19:25:18.354734+0000 mgr.vm01.nwhpas (mgr.14227) 271 : cluster [DBG] pgmap v134: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:20.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:19 vm01 bash[28222]: cluster 2026-04-16T19:25:18.354734+0000 mgr.vm01.nwhpas (mgr.14227) 271 : cluster [DBG] pgmap v134: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:20.765 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:25:20.951 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:25:20.951 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (85s) 52s ago 2m 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:25:20.951 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 52s ago 2m - - 2026-04-16T19:25:20.951 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 53s ago 2m 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:25:20.951 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (2m) 53s ago 2m 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:25:21.174 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:25:21.174 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:25:21.174 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:25:22.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:21 vm04 bash[34817]: cluster 2026-04-16T19:25:20.355147+0000 mgr.vm01.nwhpas (mgr.14227) 272 : cluster [DBG] pgmap v135: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:22.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:21 vm04 bash[34817]: cluster 2026-04-16T19:25:20.355147+0000 mgr.vm01.nwhpas (mgr.14227) 272 : cluster [DBG] pgmap v135: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:22.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:21 vm04 bash[34817]: audit 2026-04-16T19:25:20.746699+0000 mgr.vm01.nwhpas (mgr.14227) 273 : audit [DBG] from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:22.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:21 vm04 bash[34817]: audit 2026-04-16T19:25:20.746699+0000 mgr.vm01.nwhpas (mgr.14227) 273 : audit [DBG] from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:22.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:21 vm04 bash[34817]: audit 2026-04-16T19:25:21.171424+0000 mon.vm01 (mon.0) 964 : audit [DBG] from='client.? 192.168.123.101:0/276990674' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:22.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:21 vm04 bash[34817]: audit 2026-04-16T19:25:21.171424+0000 mon.vm01 (mon.0) 964 : audit [DBG] from='client.? 192.168.123.101:0/276990674' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:22.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:21 vm01 bash[28222]: cluster 2026-04-16T19:25:20.355147+0000 mgr.vm01.nwhpas (mgr.14227) 272 : cluster [DBG] pgmap v135: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:22.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:21 vm01 bash[28222]: cluster 2026-04-16T19:25:20.355147+0000 mgr.vm01.nwhpas (mgr.14227) 272 : cluster [DBG] pgmap v135: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:22.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:21 vm01 bash[28222]: audit 2026-04-16T19:25:20.746699+0000 mgr.vm01.nwhpas (mgr.14227) 273 : audit [DBG] from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:22.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:21 vm01 bash[28222]: audit 2026-04-16T19:25:20.746699+0000 mgr.vm01.nwhpas (mgr.14227) 273 : audit [DBG] from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:22.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:21 vm01 bash[28222]: audit 2026-04-16T19:25:21.171424+0000 mon.vm01 (mon.0) 964 : audit [DBG] from='client.? 192.168.123.101:0/276990674' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:22.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:21 vm01 bash[28222]: audit 2026-04-16T19:25:21.171424+0000 mon.vm01 (mon.0) 964 : audit [DBG] from='client.? 192.168.123.101:0/276990674' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:22 vm04 bash[34817]: audit 2026-04-16T19:25:20.945439+0000 mgr.vm01.nwhpas (mgr.14227) 274 : audit [DBG] from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:22 vm04 bash[34817]: audit 2026-04-16T19:25:20.945439+0000 mgr.vm01.nwhpas (mgr.14227) 274 : audit [DBG] from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:22 vm04 bash[34817]: audit 2026-04-16T19:25:22.565003+0000 mon.vm01 (mon.0) 965 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:22 vm04 bash[34817]: audit 2026-04-16T19:25:22.565003+0000 mon.vm01 (mon.0) 965 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:23.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:22 vm01 bash[28222]: audit 2026-04-16T19:25:20.945439+0000 mgr.vm01.nwhpas (mgr.14227) 274 : audit [DBG] from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:23.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:22 vm01 bash[28222]: audit 2026-04-16T19:25:20.945439+0000 mgr.vm01.nwhpas (mgr.14227) 274 : audit [DBG] from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:23.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:22 vm01 bash[28222]: audit 2026-04-16T19:25:22.565003+0000 mon.vm01 (mon.0) 965 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:23.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:22 vm01 bash[28222]: audit 2026-04-16T19:25:22.565003+0000 mon.vm01 (mon.0) 965 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:24.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:23 vm04 bash[34817]: cluster 2026-04-16T19:25:22.355578+0000 mgr.vm01.nwhpas (mgr.14227) 275 : cluster [DBG] pgmap v136: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:24.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:23 vm04 bash[34817]: cluster 2026-04-16T19:25:22.355578+0000 mgr.vm01.nwhpas (mgr.14227) 275 : cluster [DBG] pgmap v136: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:24.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:23 vm01 bash[28222]: cluster 2026-04-16T19:25:22.355578+0000 mgr.vm01.nwhpas (mgr.14227) 275 : cluster [DBG] pgmap v136: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:24.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:23 vm01 bash[28222]: cluster 2026-04-16T19:25:22.355578+0000 mgr.vm01.nwhpas (mgr.14227) 275 : cluster [DBG] pgmap v136: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:26.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:25 vm04 bash[34817]: cluster 2026-04-16T19:25:24.355975+0000 mgr.vm01.nwhpas (mgr.14227) 276 : cluster [DBG] pgmap v137: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:26.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:25 vm04 bash[34817]: cluster 2026-04-16T19:25:24.355975+0000 mgr.vm01.nwhpas (mgr.14227) 276 : cluster [DBG] pgmap v137: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:26.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:25 vm01 bash[28222]: cluster 2026-04-16T19:25:24.355975+0000 mgr.vm01.nwhpas (mgr.14227) 276 : cluster [DBG] pgmap v137: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:26.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:25 vm01 bash[28222]: cluster 2026-04-16T19:25:24.355975+0000 mgr.vm01.nwhpas (mgr.14227) 276 : cluster [DBG] pgmap v137: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:26.374 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:25:26.551 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:25:26.551 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (90s) 58s ago 2m 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:25:26.551 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 58s ago 2m - - 2026-04-16T19:25:26.551 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 58s ago 2m 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:25:26.551 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (2m) 58s ago 2m 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:25:26.782 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:25:26.783 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:25:26.783 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:25:27.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:26 vm04 bash[34817]: audit 2026-04-16T19:25:26.780171+0000 mon.vm01 (mon.0) 966 : audit [DBG] from='client.? 192.168.123.101:0/1996621555' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:27.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:26 vm04 bash[34817]: audit 2026-04-16T19:25:26.780171+0000 mon.vm01 (mon.0) 966 : audit [DBG] from='client.? 192.168.123.101:0/1996621555' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:27.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:26 vm01 bash[28222]: audit 2026-04-16T19:25:26.780171+0000 mon.vm01 (mon.0) 966 : audit [DBG] from='client.? 192.168.123.101:0/1996621555' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:27.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:26 vm01 bash[28222]: audit 2026-04-16T19:25:26.780171+0000 mon.vm01 (mon.0) 966 : audit [DBG] from='client.? 192.168.123.101:0/1996621555' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:28.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:27 vm04 bash[34817]: audit 2026-04-16T19:25:26.354515+0000 mgr.vm01.nwhpas (mgr.14227) 277 : audit [DBG] from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:28.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:27 vm04 bash[34817]: audit 2026-04-16T19:25:26.354515+0000 mgr.vm01.nwhpas (mgr.14227) 277 : audit [DBG] from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:28.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:27 vm04 bash[34817]: cluster 2026-04-16T19:25:26.356442+0000 mgr.vm01.nwhpas (mgr.14227) 278 : cluster [DBG] pgmap v138: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:28.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:27 vm04 bash[34817]: cluster 2026-04-16T19:25:26.356442+0000 mgr.vm01.nwhpas (mgr.14227) 278 : cluster [DBG] pgmap v138: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:28.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:27 vm04 bash[34817]: audit 2026-04-16T19:25:26.545606+0000 mgr.vm01.nwhpas (mgr.14227) 279 : audit [DBG] from='client.15000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:28.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:27 vm04 bash[34817]: audit 2026-04-16T19:25:26.545606+0000 mgr.vm01.nwhpas (mgr.14227) 279 : audit [DBG] from='client.15000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:28.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:27 vm01 bash[28222]: audit 2026-04-16T19:25:26.354515+0000 mgr.vm01.nwhpas (mgr.14227) 277 : audit [DBG] from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:28.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:27 vm01 bash[28222]: audit 2026-04-16T19:25:26.354515+0000 mgr.vm01.nwhpas (mgr.14227) 277 : audit [DBG] from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:28.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:27 vm01 bash[28222]: cluster 2026-04-16T19:25:26.356442+0000 mgr.vm01.nwhpas (mgr.14227) 278 : cluster [DBG] pgmap v138: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:28.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:27 vm01 bash[28222]: cluster 2026-04-16T19:25:26.356442+0000 mgr.vm01.nwhpas (mgr.14227) 278 : cluster [DBG] pgmap v138: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:28.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:27 vm01 bash[28222]: audit 2026-04-16T19:25:26.545606+0000 mgr.vm01.nwhpas (mgr.14227) 279 : audit [DBG] from='client.15000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:28.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:27 vm01 bash[28222]: audit 2026-04-16T19:25:26.545606+0000 mgr.vm01.nwhpas (mgr.14227) 279 : audit [DBG] from='client.15000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:29.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:28 vm04 bash[34817]: cluster 2026-04-16T19:25:28.356784+0000 mgr.vm01.nwhpas (mgr.14227) 280 : cluster [DBG] pgmap v139: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:29.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:28 vm04 bash[34817]: cluster 2026-04-16T19:25:28.356784+0000 mgr.vm01.nwhpas (mgr.14227) 280 : cluster [DBG] pgmap v139: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:29.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:28 vm04 bash[34817]: audit 2026-04-16T19:25:28.369058+0000 mon.vm01 (mon.0) 967 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:25:29.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:28 vm04 bash[34817]: audit 2026-04-16T19:25:28.369058+0000 mon.vm01 (mon.0) 967 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:25:29.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:28 vm01 bash[28222]: cluster 2026-04-16T19:25:28.356784+0000 mgr.vm01.nwhpas (mgr.14227) 280 : cluster [DBG] pgmap v139: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:29.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:28 vm01 bash[28222]: cluster 2026-04-16T19:25:28.356784+0000 mgr.vm01.nwhpas (mgr.14227) 280 : cluster [DBG] pgmap v139: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:29.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:28 vm01 bash[28222]: audit 2026-04-16T19:25:28.369058+0000 mon.vm01 (mon.0) 967 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:25:29.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:28 vm01 bash[28222]: audit 2026-04-16T19:25:28.369058+0000 mon.vm01 (mon.0) 967 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:25:31.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:31 vm04 bash[34817]: cluster 2026-04-16T19:25:30.357177+0000 mgr.vm01.nwhpas (mgr.14227) 281 : cluster [DBG] pgmap v140: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:31.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:31 vm04 bash[34817]: cluster 2026-04-16T19:25:30.357177+0000 mgr.vm01.nwhpas (mgr.14227) 281 : cluster [DBG] pgmap v140: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:31.710 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:31 vm01 bash[28222]: cluster 2026-04-16T19:25:30.357177+0000 mgr.vm01.nwhpas (mgr.14227) 281 : cluster [DBG] pgmap v140: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:31.710 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:31 vm01 bash[28222]: cluster 2026-04-16T19:25:30.357177+0000 mgr.vm01.nwhpas (mgr.14227) 281 : cluster [DBG] pgmap v140: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:31.984 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:25:32.162 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:25:32.162 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (96s) 63s ago 2m 95.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:25:32.162 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 63s ago 2m - - 2026-04-16T19:25:32.162 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 64s ago 2m 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:25:32.162 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (2m) 64s ago 2m 101M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:25:32.389 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:25:32.389 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:25:32.389 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:25:32.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:32 vm04 bash[34817]: audit 2026-04-16T19:25:32.386549+0000 mon.vm01 (mon.0) 968 : audit [DBG] from='client.? 192.168.123.101:0/1828129588' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:32.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:32 vm04 bash[34817]: audit 2026-04-16T19:25:32.386549+0000 mon.vm01 (mon.0) 968 : audit [DBG] from='client.? 192.168.123.101:0/1828129588' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:32.710 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:32 vm01 bash[28222]: audit 2026-04-16T19:25:32.386549+0000 mon.vm01 (mon.0) 968 : audit [DBG] from='client.? 192.168.123.101:0/1828129588' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:32.710 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:32 vm01 bash[28222]: audit 2026-04-16T19:25:32.386549+0000 mon.vm01 (mon.0) 968 : audit [DBG] from='client.? 192.168.123.101:0/1828129588' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:33.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:33 vm04 bash[34817]: audit 2026-04-16T19:25:31.966461+0000 mgr.vm01.nwhpas (mgr.14227) 282 : audit [DBG] from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:33.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:33 vm04 bash[34817]: audit 2026-04-16T19:25:31.966461+0000 mgr.vm01.nwhpas (mgr.14227) 282 : audit [DBG] from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:33.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:33 vm04 bash[34817]: audit 2026-04-16T19:25:32.157295+0000 mgr.vm01.nwhpas (mgr.14227) 283 : audit [DBG] from='client.15012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:33.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:33 vm04 bash[34817]: audit 2026-04-16T19:25:32.157295+0000 mgr.vm01.nwhpas (mgr.14227) 283 : audit [DBG] from='client.15012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:33.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:33 vm04 bash[34817]: cluster 2026-04-16T19:25:32.357583+0000 mgr.vm01.nwhpas (mgr.14227) 284 : cluster [DBG] pgmap v141: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:33.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:33 vm04 bash[34817]: cluster 2026-04-16T19:25:32.357583+0000 mgr.vm01.nwhpas (mgr.14227) 284 : cluster [DBG] pgmap v141: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:33.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:33 vm04 bash[34817]: audit 2026-04-16T19:25:33.143805+0000 mon.vm01 (mon.0) 969 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:33.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:33 vm04 bash[34817]: audit 2026-04-16T19:25:33.143805+0000 mon.vm01 (mon.0) 969 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:33.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:33 vm04 bash[34817]: audit 2026-04-16T19:25:33.149193+0000 mon.vm01 (mon.0) 970 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:33.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:33 vm04 bash[34817]: audit 2026-04-16T19:25:33.149193+0000 mon.vm01 (mon.0) 970 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:33.710 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:33 vm01 bash[28222]: audit 2026-04-16T19:25:31.966461+0000 mgr.vm01.nwhpas (mgr.14227) 282 : audit [DBG] from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:33.710 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:33 vm01 bash[28222]: audit 2026-04-16T19:25:31.966461+0000 mgr.vm01.nwhpas (mgr.14227) 282 : audit [DBG] from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:33.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:33 vm01 bash[28222]: audit 2026-04-16T19:25:32.157295+0000 mgr.vm01.nwhpas (mgr.14227) 283 : audit [DBG] from='client.15012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:33.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:33 vm01 bash[28222]: audit 2026-04-16T19:25:32.157295+0000 mgr.vm01.nwhpas (mgr.14227) 283 : audit [DBG] from='client.15012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:33.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:33 vm01 bash[28222]: cluster 2026-04-16T19:25:32.357583+0000 mgr.vm01.nwhpas (mgr.14227) 284 : cluster [DBG] pgmap v141: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:33.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:33 vm01 bash[28222]: cluster 2026-04-16T19:25:32.357583+0000 mgr.vm01.nwhpas (mgr.14227) 284 : cluster [DBG] pgmap v141: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:25:33.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:33 vm01 bash[28222]: audit 2026-04-16T19:25:33.143805+0000 mon.vm01 (mon.0) 969 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:33.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:33 vm01 bash[28222]: audit 2026-04-16T19:25:33.143805+0000 mon.vm01 (mon.0) 969 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:33.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:33 vm01 bash[28222]: audit 2026-04-16T19:25:33.149193+0000 mon.vm01 (mon.0) 970 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:33.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:33 vm01 bash[28222]: audit 2026-04-16T19:25:33.149193+0000 mon.vm01 (mon.0) 970 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:33.717152+0000 mon.vm01 (mon.0) 971 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:33.717152+0000 mon.vm01 (mon.0) 971 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:33.722580+0000 mon.vm01 (mon.0) 972 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:33.722580+0000 mon.vm01 (mon.0) 972 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:34.055507+0000 mon.vm01 (mon.0) 973 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:25:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:34.055507+0000 mon.vm01 (mon.0) 973 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:25:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:34.055973+0000 mon.vm01 (mon.0) 974 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:25:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:34.055973+0000 mon.vm01 (mon.0) 974 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:25:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:34.060746+0000 mon.vm01 (mon.0) 975 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:34.060746+0000 mon.vm01 (mon.0) 975 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:34.061908+0000 mon.vm01 (mon.0) 976 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:25:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:34 vm04 bash[34817]: audit 2026-04-16T19:25:34.061908+0000 mon.vm01 (mon.0) 976 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:25:35.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:33.717152+0000 mon.vm01 (mon.0) 971 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:33.717152+0000 mon.vm01 (mon.0) 971 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:33.722580+0000 mon.vm01 (mon.0) 972 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:33.722580+0000 mon.vm01 (mon.0) 972 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:34.055507+0000 mon.vm01 (mon.0) 973 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:25:35.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:34.055507+0000 mon.vm01 (mon.0) 973 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:25:35.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:34.055973+0000 mon.vm01 (mon.0) 974 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:25:35.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:34.055973+0000 mon.vm01 (mon.0) 974 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:25:35.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:34.060746+0000 mon.vm01 (mon.0) 975 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:34.060746+0000 mon.vm01 (mon.0) 975 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:25:35.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:34.061908+0000 mon.vm01 (mon.0) 976 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:25:35.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:34 vm01 bash[28222]: audit 2026-04-16T19:25:34.061908+0000 mon.vm01 (mon.0) 976 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:25:36.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:35 vm04 bash[34817]: cluster 2026-04-16T19:25:34.056838+0000 mgr.vm01.nwhpas (mgr.14227) 285 : cluster [DBG] pgmap v142: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:25:36.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:35 vm04 bash[34817]: cluster 2026-04-16T19:25:34.056838+0000 mgr.vm01.nwhpas (mgr.14227) 285 : cluster [DBG] pgmap v142: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:25:36.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:35 vm04 bash[34817]: cluster 2026-04-16T19:25:34.056918+0000 mgr.vm01.nwhpas (mgr.14227) 286 : cluster [DBG] pgmap v143: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 105 B/s rd, 211 B/s wr, 0 op/s 2026-04-16T19:25:36.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:35 vm04 bash[34817]: cluster 2026-04-16T19:25:34.056918+0000 mgr.vm01.nwhpas (mgr.14227) 286 : cluster [DBG] pgmap v143: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 105 B/s rd, 211 B/s wr, 0 op/s 2026-04-16T19:25:36.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:35 vm01 bash[28222]: cluster 2026-04-16T19:25:34.056838+0000 mgr.vm01.nwhpas (mgr.14227) 285 : cluster [DBG] pgmap v142: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:25:36.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:35 vm01 bash[28222]: cluster 2026-04-16T19:25:34.056838+0000 mgr.vm01.nwhpas (mgr.14227) 285 : cluster [DBG] pgmap v142: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:25:36.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:35 vm01 bash[28222]: cluster 2026-04-16T19:25:34.056918+0000 mgr.vm01.nwhpas (mgr.14227) 286 : cluster [DBG] pgmap v143: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 105 B/s rd, 211 B/s wr, 0 op/s 2026-04-16T19:25:36.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:35 vm01 bash[28222]: cluster 2026-04-16T19:25:34.056918+0000 mgr.vm01.nwhpas (mgr.14227) 286 : cluster [DBG] pgmap v143: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 105 B/s rd, 211 B/s wr, 0 op/s 2026-04-16T19:25:37.596 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:25:37.793 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:25:37.794 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (102s) 4s ago 2m 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:25:37.794 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 4s ago 2m - - 2026-04-16T19:25:37.794 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 4s ago 2m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:25:37.794 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (2m) 4s ago 2m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:25:38.023 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:25:38.024 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:25:38.024 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:25:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:37 vm04 bash[34817]: cluster 2026-04-16T19:25:36.057285+0000 mgr.vm01.nwhpas (mgr.14227) 287 : cluster [DBG] pgmap v144: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 105 B/s rd, 211 B/s wr, 0 op/s 2026-04-16T19:25:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:37 vm04 bash[34817]: cluster 2026-04-16T19:25:36.057285+0000 mgr.vm01.nwhpas (mgr.14227) 287 : cluster [DBG] pgmap v144: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 105 B/s rd, 211 B/s wr, 0 op/s 2026-04-16T19:25:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:37 vm04 bash[34817]: audit 2026-04-16T19:25:37.565183+0000 mon.vm01 (mon.0) 977 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:37 vm04 bash[34817]: audit 2026-04-16T19:25:37.565183+0000 mon.vm01 (mon.0) 977 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:38.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:37 vm01 bash[28222]: cluster 2026-04-16T19:25:36.057285+0000 mgr.vm01.nwhpas (mgr.14227) 287 : cluster [DBG] pgmap v144: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 105 B/s rd, 211 B/s wr, 0 op/s 2026-04-16T19:25:38.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:37 vm01 bash[28222]: cluster 2026-04-16T19:25:36.057285+0000 mgr.vm01.nwhpas (mgr.14227) 287 : cluster [DBG] pgmap v144: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 105 B/s rd, 211 B/s wr, 0 op/s 2026-04-16T19:25:38.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:37 vm01 bash[28222]: audit 2026-04-16T19:25:37.565183+0000 mon.vm01 (mon.0) 977 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:38.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:37 vm01 bash[28222]: audit 2026-04-16T19:25:37.565183+0000 mon.vm01 (mon.0) 977 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:39.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:38 vm04 bash[34817]: audit 2026-04-16T19:25:37.574014+0000 mgr.vm01.nwhpas (mgr.14227) 288 : audit [DBG] from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:39.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:38 vm04 bash[34817]: audit 2026-04-16T19:25:37.574014+0000 mgr.vm01.nwhpas (mgr.14227) 288 : audit [DBG] from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:39.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:38 vm04 bash[34817]: audit 2026-04-16T19:25:37.786633+0000 mgr.vm01.nwhpas (mgr.14227) 289 : audit [DBG] from='client.15024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:39.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:38 vm04 bash[34817]: audit 2026-04-16T19:25:37.786633+0000 mgr.vm01.nwhpas (mgr.14227) 289 : audit [DBG] from='client.15024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:39.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:38 vm04 bash[34817]: audit 2026-04-16T19:25:38.020972+0000 mon.vm01 (mon.0) 978 : audit [DBG] from='client.? 192.168.123.101:0/589975940' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:39.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:38 vm04 bash[34817]: audit 2026-04-16T19:25:38.020972+0000 mon.vm01 (mon.0) 978 : audit [DBG] from='client.? 192.168.123.101:0/589975940' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:39.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:38 vm01 bash[28222]: audit 2026-04-16T19:25:37.574014+0000 mgr.vm01.nwhpas (mgr.14227) 288 : audit [DBG] from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:39.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:38 vm01 bash[28222]: audit 2026-04-16T19:25:37.574014+0000 mgr.vm01.nwhpas (mgr.14227) 288 : audit [DBG] from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:39.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:38 vm01 bash[28222]: audit 2026-04-16T19:25:37.786633+0000 mgr.vm01.nwhpas (mgr.14227) 289 : audit [DBG] from='client.15024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:39.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:38 vm01 bash[28222]: audit 2026-04-16T19:25:37.786633+0000 mgr.vm01.nwhpas (mgr.14227) 289 : audit [DBG] from='client.15024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:39.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:38 vm01 bash[28222]: audit 2026-04-16T19:25:38.020972+0000 mon.vm01 (mon.0) 978 : audit [DBG] from='client.? 192.168.123.101:0/589975940' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:39.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:38 vm01 bash[28222]: audit 2026-04-16T19:25:38.020972+0000 mon.vm01 (mon.0) 978 : audit [DBG] from='client.? 192.168.123.101:0/589975940' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:40.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:39 vm04 bash[34817]: cluster 2026-04-16T19:25:38.057688+0000 mgr.vm01.nwhpas (mgr.14227) 290 : cluster [DBG] pgmap v145: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:40.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:39 vm04 bash[34817]: cluster 2026-04-16T19:25:38.057688+0000 mgr.vm01.nwhpas (mgr.14227) 290 : cluster [DBG] pgmap v145: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:40.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:39 vm01 bash[28222]: cluster 2026-04-16T19:25:38.057688+0000 mgr.vm01.nwhpas (mgr.14227) 290 : cluster [DBG] pgmap v145: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:40.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:39 vm01 bash[28222]: cluster 2026-04-16T19:25:38.057688+0000 mgr.vm01.nwhpas (mgr.14227) 290 : cluster [DBG] pgmap v145: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:42.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:41 vm04 bash[34817]: cluster 2026-04-16T19:25:40.058084+0000 mgr.vm01.nwhpas (mgr.14227) 291 : cluster [DBG] pgmap v146: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:42.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:41 vm04 bash[34817]: cluster 2026-04-16T19:25:40.058084+0000 mgr.vm01.nwhpas (mgr.14227) 291 : cluster [DBG] pgmap v146: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:42.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:41 vm01 bash[28222]: cluster 2026-04-16T19:25:40.058084+0000 mgr.vm01.nwhpas (mgr.14227) 291 : cluster [DBG] pgmap v146: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:42.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:41 vm01 bash[28222]: cluster 2026-04-16T19:25:40.058084+0000 mgr.vm01.nwhpas (mgr.14227) 291 : cluster [DBG] pgmap v146: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:43.229 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:25:43.424 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:25:43.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (107s) 9s ago 2m 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:25:43.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 9s ago 2m - - 2026-04-16T19:25:43.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 10s ago 2m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:25:43.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (2m) 10s ago 2m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:25:43.650 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:25:43.650 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:25:43.650 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:25:44.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:43 vm04 bash[34817]: cluster 2026-04-16T19:25:42.058505+0000 mgr.vm01.nwhpas (mgr.14227) 292 : cluster [DBG] pgmap v147: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:44.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:43 vm04 bash[34817]: cluster 2026-04-16T19:25:42.058505+0000 mgr.vm01.nwhpas (mgr.14227) 292 : cluster [DBG] pgmap v147: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:44.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:43 vm04 bash[34817]: audit 2026-04-16T19:25:43.647460+0000 mon.vm01 (mon.0) 979 : audit [DBG] from='client.? 192.168.123.101:0/1635013915' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:44.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:43 vm04 bash[34817]: audit 2026-04-16T19:25:43.647460+0000 mon.vm01 (mon.0) 979 : audit [DBG] from='client.? 192.168.123.101:0/1635013915' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:44.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:43 vm01 bash[28222]: cluster 2026-04-16T19:25:42.058505+0000 mgr.vm01.nwhpas (mgr.14227) 292 : cluster [DBG] pgmap v147: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:44.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:43 vm01 bash[28222]: cluster 2026-04-16T19:25:42.058505+0000 mgr.vm01.nwhpas (mgr.14227) 292 : cluster [DBG] pgmap v147: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:44.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:43 vm01 bash[28222]: audit 2026-04-16T19:25:43.647460+0000 mon.vm01 (mon.0) 979 : audit [DBG] from='client.? 192.168.123.101:0/1635013915' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:44.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:43 vm01 bash[28222]: audit 2026-04-16T19:25:43.647460+0000 mon.vm01 (mon.0) 979 : audit [DBG] from='client.? 192.168.123.101:0/1635013915' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:45.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:44 vm04 bash[34817]: audit 2026-04-16T19:25:43.209815+0000 mgr.vm01.nwhpas (mgr.14227) 293 : audit [DBG] from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:45.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:44 vm04 bash[34817]: audit 2026-04-16T19:25:43.209815+0000 mgr.vm01.nwhpas (mgr.14227) 293 : audit [DBG] from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:45.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:44 vm04 bash[34817]: audit 2026-04-16T19:25:43.416727+0000 mgr.vm01.nwhpas (mgr.14227) 294 : audit [DBG] from='client.15036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:45.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:44 vm04 bash[34817]: audit 2026-04-16T19:25:43.416727+0000 mgr.vm01.nwhpas (mgr.14227) 294 : audit [DBG] from='client.15036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:45.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:44 vm01 bash[28222]: audit 2026-04-16T19:25:43.209815+0000 mgr.vm01.nwhpas (mgr.14227) 293 : audit [DBG] from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:45.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:44 vm01 bash[28222]: audit 2026-04-16T19:25:43.209815+0000 mgr.vm01.nwhpas (mgr.14227) 293 : audit [DBG] from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:45.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:44 vm01 bash[28222]: audit 2026-04-16T19:25:43.416727+0000 mgr.vm01.nwhpas (mgr.14227) 294 : audit [DBG] from='client.15036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:45.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:44 vm01 bash[28222]: audit 2026-04-16T19:25:43.416727+0000 mgr.vm01.nwhpas (mgr.14227) 294 : audit [DBG] from='client.15036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:46.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:45 vm04 bash[34817]: cluster 2026-04-16T19:25:44.059044+0000 mgr.vm01.nwhpas (mgr.14227) 295 : cluster [DBG] pgmap v148: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:25:46.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:45 vm04 bash[34817]: cluster 2026-04-16T19:25:44.059044+0000 mgr.vm01.nwhpas (mgr.14227) 295 : cluster [DBG] pgmap v148: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:25:46.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:45 vm01 bash[28222]: cluster 2026-04-16T19:25:44.059044+0000 mgr.vm01.nwhpas (mgr.14227) 295 : cluster [DBG] pgmap v148: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:25:46.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:45 vm01 bash[28222]: cluster 2026-04-16T19:25:44.059044+0000 mgr.vm01.nwhpas (mgr.14227) 295 : cluster [DBG] pgmap v148: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:25:48.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:47 vm04 bash[34817]: cluster 2026-04-16T19:25:46.059457+0000 mgr.vm01.nwhpas (mgr.14227) 296 : cluster [DBG] pgmap v149: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:48.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:47 vm04 bash[34817]: cluster 2026-04-16T19:25:46.059457+0000 mgr.vm01.nwhpas (mgr.14227) 296 : cluster [DBG] pgmap v149: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:48.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:47 vm01 bash[28222]: cluster 2026-04-16T19:25:46.059457+0000 mgr.vm01.nwhpas (mgr.14227) 296 : cluster [DBG] pgmap v149: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:48.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:47 vm01 bash[28222]: cluster 2026-04-16T19:25:46.059457+0000 mgr.vm01.nwhpas (mgr.14227) 296 : cluster [DBG] pgmap v149: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:48.878 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:25:49.075 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:25:49.075 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (113s) 15s ago 2m 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:25:49.075 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 15s ago 2m - - 2026-04-16T19:25:49.075 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 15s ago 2m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:25:49.075 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (2m) 15s ago 2m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:25:49.333 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:25:49.333 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:25:49.333 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:25:50.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:49 vm04 bash[34817]: cluster 2026-04-16T19:25:48.059923+0000 mgr.vm01.nwhpas (mgr.14227) 297 : cluster [DBG] pgmap v150: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:50.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:49 vm04 bash[34817]: cluster 2026-04-16T19:25:48.059923+0000 mgr.vm01.nwhpas (mgr.14227) 297 : cluster [DBG] pgmap v150: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:50.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:49 vm04 bash[34817]: audit 2026-04-16T19:25:48.856782+0000 mgr.vm01.nwhpas (mgr.14227) 298 : audit [DBG] from='client.15044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:50.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:49 vm04 bash[34817]: audit 2026-04-16T19:25:48.856782+0000 mgr.vm01.nwhpas (mgr.14227) 298 : audit [DBG] from='client.15044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:50.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:49 vm04 bash[34817]: audit 2026-04-16T19:25:49.330449+0000 mon.vm01 (mon.0) 980 : audit [DBG] from='client.? 192.168.123.101:0/1738109173' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:50.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:49 vm04 bash[34817]: audit 2026-04-16T19:25:49.330449+0000 mon.vm01 (mon.0) 980 : audit [DBG] from='client.? 192.168.123.101:0/1738109173' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:50.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:49 vm01 bash[28222]: cluster 2026-04-16T19:25:48.059923+0000 mgr.vm01.nwhpas (mgr.14227) 297 : cluster [DBG] pgmap v150: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:50.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:49 vm01 bash[28222]: cluster 2026-04-16T19:25:48.059923+0000 mgr.vm01.nwhpas (mgr.14227) 297 : cluster [DBG] pgmap v150: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:50.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:49 vm01 bash[28222]: audit 2026-04-16T19:25:48.856782+0000 mgr.vm01.nwhpas (mgr.14227) 298 : audit [DBG] from='client.15044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:50.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:49 vm01 bash[28222]: audit 2026-04-16T19:25:48.856782+0000 mgr.vm01.nwhpas (mgr.14227) 298 : audit [DBG] from='client.15044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:50.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:49 vm01 bash[28222]: audit 2026-04-16T19:25:49.330449+0000 mon.vm01 (mon.0) 980 : audit [DBG] from='client.? 192.168.123.101:0/1738109173' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:50.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:49 vm01 bash[28222]: audit 2026-04-16T19:25:49.330449+0000 mon.vm01 (mon.0) 980 : audit [DBG] from='client.? 192.168.123.101:0/1738109173' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:51.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:50 vm04 bash[34817]: audit 2026-04-16T19:25:49.068682+0000 mgr.vm01.nwhpas (mgr.14227) 299 : audit [DBG] from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:51.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:50 vm04 bash[34817]: audit 2026-04-16T19:25:49.068682+0000 mgr.vm01.nwhpas (mgr.14227) 299 : audit [DBG] from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:51.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:50 vm01 bash[28222]: audit 2026-04-16T19:25:49.068682+0000 mgr.vm01.nwhpas (mgr.14227) 299 : audit [DBG] from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:51.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:50 vm01 bash[28222]: audit 2026-04-16T19:25:49.068682+0000 mgr.vm01.nwhpas (mgr.14227) 299 : audit [DBG] from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:52.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:51 vm04 bash[34817]: cluster 2026-04-16T19:25:50.060399+0000 mgr.vm01.nwhpas (mgr.14227) 300 : cluster [DBG] pgmap v151: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:52.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:51 vm04 bash[34817]: cluster 2026-04-16T19:25:50.060399+0000 mgr.vm01.nwhpas (mgr.14227) 300 : cluster [DBG] pgmap v151: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:52.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:51 vm01 bash[28222]: cluster 2026-04-16T19:25:50.060399+0000 mgr.vm01.nwhpas (mgr.14227) 300 : cluster [DBG] pgmap v151: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:52.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:51 vm01 bash[28222]: cluster 2026-04-16T19:25:50.060399+0000 mgr.vm01.nwhpas (mgr.14227) 300 : cluster [DBG] pgmap v151: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:53.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:52 vm04 bash[34817]: audit 2026-04-16T19:25:52.565242+0000 mon.vm01 (mon.0) 981 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:53.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:52 vm04 bash[34817]: audit 2026-04-16T19:25:52.565242+0000 mon.vm01 (mon.0) 981 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:53.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:52 vm01 bash[28222]: audit 2026-04-16T19:25:52.565242+0000 mon.vm01 (mon.0) 981 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:53.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:52 vm01 bash[28222]: audit 2026-04-16T19:25:52.565242+0000 mon.vm01 (mon.0) 981 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:25:54.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:53 vm04 bash[34817]: cluster 2026-04-16T19:25:52.060783+0000 mgr.vm01.nwhpas (mgr.14227) 301 : cluster [DBG] pgmap v152: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:54.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:53 vm04 bash[34817]: cluster 2026-04-16T19:25:52.060783+0000 mgr.vm01.nwhpas (mgr.14227) 301 : cluster [DBG] pgmap v152: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:54.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:53 vm01 bash[28222]: cluster 2026-04-16T19:25:52.060783+0000 mgr.vm01.nwhpas (mgr.14227) 301 : cluster [DBG] pgmap v152: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:54.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:53 vm01 bash[28222]: cluster 2026-04-16T19:25:52.060783+0000 mgr.vm01.nwhpas (mgr.14227) 301 : cluster [DBG] pgmap v152: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:54.553 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:25:54.739 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:25:54.739 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (119s) 21s ago 2m 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:25:54.739 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 21s ago 2m - - 2026-04-16T19:25:54.739 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 21s ago 2m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:25:54.739 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (2m) 21s ago 2m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:25:54.985 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:25:54.985 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:25:54.985 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:25:56.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:55 vm04 bash[34817]: cluster 2026-04-16T19:25:54.061188+0000 mgr.vm01.nwhpas (mgr.14227) 302 : cluster [DBG] pgmap v153: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:56.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:55 vm04 bash[34817]: cluster 2026-04-16T19:25:54.061188+0000 mgr.vm01.nwhpas (mgr.14227) 302 : cluster [DBG] pgmap v153: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:56.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:55 vm04 bash[34817]: audit 2026-04-16T19:25:54.532767+0000 mgr.vm01.nwhpas (mgr.14227) 303 : audit [DBG] from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:56.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:55 vm04 bash[34817]: audit 2026-04-16T19:25:54.532767+0000 mgr.vm01.nwhpas (mgr.14227) 303 : audit [DBG] from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:56.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:55 vm04 bash[34817]: audit 2026-04-16T19:25:54.732601+0000 mgr.vm01.nwhpas (mgr.14227) 304 : audit [DBG] from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:56.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:55 vm04 bash[34817]: audit 2026-04-16T19:25:54.732601+0000 mgr.vm01.nwhpas (mgr.14227) 304 : audit [DBG] from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:56.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:55 vm04 bash[34817]: audit 2026-04-16T19:25:54.981794+0000 mon.vm01 (mon.0) 982 : audit [DBG] from='client.? 192.168.123.101:0/520516198' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:56.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:55 vm04 bash[34817]: audit 2026-04-16T19:25:54.981794+0000 mon.vm01 (mon.0) 982 : audit [DBG] from='client.? 192.168.123.101:0/520516198' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:56.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:55 vm01 bash[28222]: cluster 2026-04-16T19:25:54.061188+0000 mgr.vm01.nwhpas (mgr.14227) 302 : cluster [DBG] pgmap v153: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:56.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:55 vm01 bash[28222]: cluster 2026-04-16T19:25:54.061188+0000 mgr.vm01.nwhpas (mgr.14227) 302 : cluster [DBG] pgmap v153: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:25:56.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:55 vm01 bash[28222]: audit 2026-04-16T19:25:54.532767+0000 mgr.vm01.nwhpas (mgr.14227) 303 : audit [DBG] from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:56.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:55 vm01 bash[28222]: audit 2026-04-16T19:25:54.532767+0000 mgr.vm01.nwhpas (mgr.14227) 303 : audit [DBG] from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:56.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:55 vm01 bash[28222]: audit 2026-04-16T19:25:54.732601+0000 mgr.vm01.nwhpas (mgr.14227) 304 : audit [DBG] from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:56.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:55 vm01 bash[28222]: audit 2026-04-16T19:25:54.732601+0000 mgr.vm01.nwhpas (mgr.14227) 304 : audit [DBG] from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:25:56.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:55 vm01 bash[28222]: audit 2026-04-16T19:25:54.981794+0000 mon.vm01 (mon.0) 982 : audit [DBG] from='client.? 192.168.123.101:0/520516198' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:56.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:55 vm01 bash[28222]: audit 2026-04-16T19:25:54.981794+0000 mon.vm01 (mon.0) 982 : audit [DBG] from='client.? 192.168.123.101:0/520516198' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:25:58.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:57 vm04 bash[34817]: cluster 2026-04-16T19:25:56.061581+0000 mgr.vm01.nwhpas (mgr.14227) 305 : cluster [DBG] pgmap v154: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:58.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:57 vm04 bash[34817]: cluster 2026-04-16T19:25:56.061581+0000 mgr.vm01.nwhpas (mgr.14227) 305 : cluster [DBG] pgmap v154: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:58.210 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:57 vm01 bash[28222]: cluster 2026-04-16T19:25:56.061581+0000 mgr.vm01.nwhpas (mgr.14227) 305 : cluster [DBG] pgmap v154: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:25:58.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:57 vm01 bash[28222]: cluster 2026-04-16T19:25:56.061581+0000 mgr.vm01.nwhpas (mgr.14227) 305 : cluster [DBG] pgmap v154: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:00.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:59 vm04 bash[34817]: cluster 2026-04-16T19:25:58.061945+0000 mgr.vm01.nwhpas (mgr.14227) 306 : cluster [DBG] pgmap v155: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:00.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:25:59 vm04 bash[34817]: cluster 2026-04-16T19:25:58.061945+0000 mgr.vm01.nwhpas (mgr.14227) 306 : cluster [DBG] pgmap v155: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:00.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:59 vm01 bash[28222]: cluster 2026-04-16T19:25:58.061945+0000 mgr.vm01.nwhpas (mgr.14227) 306 : cluster [DBG] pgmap v155: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:00.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:25:59 vm01 bash[28222]: cluster 2026-04-16T19:25:58.061945+0000 mgr.vm01.nwhpas (mgr.14227) 306 : cluster [DBG] pgmap v155: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:00.215 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:26:00.405 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:26:00.405 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (2m) 26s ago 2m 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:26:00.405 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 26s ago 2m - - 2026-04-16T19:26:00.405 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 27s ago 2m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:26:00.405 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (2m) 27s ago 2m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:26:00.658 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:26:00.658 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:26:00.658 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:26:01.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:00 vm04 bash[34817]: audit 2026-04-16T19:26:00.654636+0000 mon.vm01 (mon.0) 983 : audit [DBG] from='client.? 192.168.123.101:0/225029467' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:01.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:00 vm04 bash[34817]: audit 2026-04-16T19:26:00.654636+0000 mon.vm01 (mon.0) 983 : audit [DBG] from='client.? 192.168.123.101:0/225029467' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:01.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:00 vm01 bash[28222]: audit 2026-04-16T19:26:00.654636+0000 mon.vm01 (mon.0) 983 : audit [DBG] from='client.? 192.168.123.101:0/225029467' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:01.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:00 vm01 bash[28222]: audit 2026-04-16T19:26:00.654636+0000 mon.vm01 (mon.0) 983 : audit [DBG] from='client.? 192.168.123.101:0/225029467' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:01 vm04 bash[34817]: cluster 2026-04-16T19:26:00.062380+0000 mgr.vm01.nwhpas (mgr.14227) 307 : cluster [DBG] pgmap v156: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:01 vm04 bash[34817]: cluster 2026-04-16T19:26:00.062380+0000 mgr.vm01.nwhpas (mgr.14227) 307 : cluster [DBG] pgmap v156: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:01 vm04 bash[34817]: audit 2026-04-16T19:26:00.194120+0000 mgr.vm01.nwhpas (mgr.14227) 308 : audit [DBG] from='client.15068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:01 vm04 bash[34817]: audit 2026-04-16T19:26:00.194120+0000 mgr.vm01.nwhpas (mgr.14227) 308 : audit [DBG] from='client.15068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:01 vm04 bash[34817]: audit 2026-04-16T19:26:00.398890+0000 mgr.vm01.nwhpas (mgr.14227) 309 : audit [DBG] from='client.15072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:01 vm04 bash[34817]: audit 2026-04-16T19:26:00.398890+0000 mgr.vm01.nwhpas (mgr.14227) 309 : audit [DBG] from='client.15072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:02.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:01 vm01 bash[28222]: cluster 2026-04-16T19:26:00.062380+0000 mgr.vm01.nwhpas (mgr.14227) 307 : cluster [DBG] pgmap v156: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:02.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:01 vm01 bash[28222]: cluster 2026-04-16T19:26:00.062380+0000 mgr.vm01.nwhpas (mgr.14227) 307 : cluster [DBG] pgmap v156: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:02.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:01 vm01 bash[28222]: audit 2026-04-16T19:26:00.194120+0000 mgr.vm01.nwhpas (mgr.14227) 308 : audit [DBG] from='client.15068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:02.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:01 vm01 bash[28222]: audit 2026-04-16T19:26:00.194120+0000 mgr.vm01.nwhpas (mgr.14227) 308 : audit [DBG] from='client.15068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:02.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:01 vm01 bash[28222]: audit 2026-04-16T19:26:00.398890+0000 mgr.vm01.nwhpas (mgr.14227) 309 : audit [DBG] from='client.15072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:02.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:01 vm01 bash[28222]: audit 2026-04-16T19:26:00.398890+0000 mgr.vm01.nwhpas (mgr.14227) 309 : audit [DBG] from='client.15072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:03.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:02 vm04 bash[34817]: cluster 2026-04-16T19:26:02.062799+0000 mgr.vm01.nwhpas (mgr.14227) 310 : cluster [DBG] pgmap v157: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:03.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:02 vm04 bash[34817]: cluster 2026-04-16T19:26:02.062799+0000 mgr.vm01.nwhpas (mgr.14227) 310 : cluster [DBG] pgmap v157: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:03.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:02 vm01 bash[28222]: cluster 2026-04-16T19:26:02.062799+0000 mgr.vm01.nwhpas (mgr.14227) 310 : cluster [DBG] pgmap v157: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:03.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:02 vm01 bash[28222]: cluster 2026-04-16T19:26:02.062799+0000 mgr.vm01.nwhpas (mgr.14227) 310 : cluster [DBG] pgmap v157: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:05.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:05 vm04 bash[34817]: cluster 2026-04-16T19:26:04.063347+0000 mgr.vm01.nwhpas (mgr.14227) 311 : cluster [DBG] pgmap v158: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:05.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:05 vm04 bash[34817]: cluster 2026-04-16T19:26:04.063347+0000 mgr.vm01.nwhpas (mgr.14227) 311 : cluster [DBG] pgmap v158: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:05.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:05 vm01 bash[28222]: cluster 2026-04-16T19:26:04.063347+0000 mgr.vm01.nwhpas (mgr.14227) 311 : cluster [DBG] pgmap v158: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:05.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:05 vm01 bash[28222]: cluster 2026-04-16T19:26:04.063347+0000 mgr.vm01.nwhpas (mgr.14227) 311 : cluster [DBG] pgmap v158: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:05.877 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:26:06.059 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:26:06.059 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (2m) 32s ago 3m 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:26:06.059 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 32s ago 3m - - 2026-04-16T19:26:06.059 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 32s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:26:06.059 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (3m) 32s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:26:06.301 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:26:06.301 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:26:06.301 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:26:06.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:06 vm04 bash[34817]: audit 2026-04-16T19:26:05.857075+0000 mgr.vm01.nwhpas (mgr.14227) 312 : audit [DBG] from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:06.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:06 vm04 bash[34817]: audit 2026-04-16T19:26:05.857075+0000 mgr.vm01.nwhpas (mgr.14227) 312 : audit [DBG] from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:06.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:06 vm01 bash[28222]: audit 2026-04-16T19:26:05.857075+0000 mgr.vm01.nwhpas (mgr.14227) 312 : audit [DBG] from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:06.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:06 vm01 bash[28222]: audit 2026-04-16T19:26:05.857075+0000 mgr.vm01.nwhpas (mgr.14227) 312 : audit [DBG] from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:07.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:07 vm04 bash[34817]: audit 2026-04-16T19:26:06.053216+0000 mgr.vm01.nwhpas (mgr.14227) 313 : audit [DBG] from='client.15084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:07.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:07 vm04 bash[34817]: audit 2026-04-16T19:26:06.053216+0000 mgr.vm01.nwhpas (mgr.14227) 313 : audit [DBG] from='client.15084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:07.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:07 vm04 bash[34817]: cluster 2026-04-16T19:26:06.063786+0000 mgr.vm01.nwhpas (mgr.14227) 314 : cluster [DBG] pgmap v159: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:07.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:07 vm04 bash[34817]: cluster 2026-04-16T19:26:06.063786+0000 mgr.vm01.nwhpas (mgr.14227) 314 : cluster [DBG] pgmap v159: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:07.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:07 vm04 bash[34817]: audit 2026-04-16T19:26:06.297969+0000 mon.vm04 (mon.1) 37 : audit [DBG] from='client.? 192.168.123.101:0/3802376384' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:07.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:07 vm04 bash[34817]: audit 2026-04-16T19:26:06.297969+0000 mon.vm04 (mon.1) 37 : audit [DBG] from='client.? 192.168.123.101:0/3802376384' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:07.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:07 vm01 bash[28222]: audit 2026-04-16T19:26:06.053216+0000 mgr.vm01.nwhpas (mgr.14227) 313 : audit [DBG] from='client.15084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:07.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:07 vm01 bash[28222]: audit 2026-04-16T19:26:06.053216+0000 mgr.vm01.nwhpas (mgr.14227) 313 : audit [DBG] from='client.15084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:07.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:07 vm01 bash[28222]: cluster 2026-04-16T19:26:06.063786+0000 mgr.vm01.nwhpas (mgr.14227) 314 : cluster [DBG] pgmap v159: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:07.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:07 vm01 bash[28222]: cluster 2026-04-16T19:26:06.063786+0000 mgr.vm01.nwhpas (mgr.14227) 314 : cluster [DBG] pgmap v159: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:07.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:07 vm01 bash[28222]: audit 2026-04-16T19:26:06.297969+0000 mon.vm04 (mon.1) 37 : audit [DBG] from='client.? 192.168.123.101:0/3802376384' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:07.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:07 vm01 bash[28222]: audit 2026-04-16T19:26:06.297969+0000 mon.vm04 (mon.1) 37 : audit [DBG] from='client.? 192.168.123.101:0/3802376384' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:08.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:08 vm04 bash[34817]: audit 2026-04-16T19:26:07.565598+0000 mon.vm01 (mon.0) 984 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:08.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:08 vm04 bash[34817]: audit 2026-04-16T19:26:07.565598+0000 mon.vm01 (mon.0) 984 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:08.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:08 vm01 bash[28222]: audit 2026-04-16T19:26:07.565598+0000 mon.vm01 (mon.0) 984 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:08.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:08 vm01 bash[28222]: audit 2026-04-16T19:26:07.565598+0000 mon.vm01 (mon.0) 984 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:09.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:09 vm04 bash[34817]: cluster 2026-04-16T19:26:08.064223+0000 mgr.vm01.nwhpas (mgr.14227) 315 : cluster [DBG] pgmap v160: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:09.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:09 vm04 bash[34817]: cluster 2026-04-16T19:26:08.064223+0000 mgr.vm01.nwhpas (mgr.14227) 315 : cluster [DBG] pgmap v160: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:09.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:09 vm01 bash[28222]: cluster 2026-04-16T19:26:08.064223+0000 mgr.vm01.nwhpas (mgr.14227) 315 : cluster [DBG] pgmap v160: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:09.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:09 vm01 bash[28222]: cluster 2026-04-16T19:26:08.064223+0000 mgr.vm01.nwhpas (mgr.14227) 315 : cluster [DBG] pgmap v160: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:11.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:11 vm04 bash[34817]: cluster 2026-04-16T19:26:10.064689+0000 mgr.vm01.nwhpas (mgr.14227) 316 : cluster [DBG] pgmap v161: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:11.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:11 vm04 bash[34817]: cluster 2026-04-16T19:26:10.064689+0000 mgr.vm01.nwhpas (mgr.14227) 316 : cluster [DBG] pgmap v161: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:11.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:11 vm01 bash[28222]: cluster 2026-04-16T19:26:10.064689+0000 mgr.vm01.nwhpas (mgr.14227) 316 : cluster [DBG] pgmap v161: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:11.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:11 vm01 bash[28222]: cluster 2026-04-16T19:26:10.064689+0000 mgr.vm01.nwhpas (mgr.14227) 316 : cluster [DBG] pgmap v161: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:11.583 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:26:11.792 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:26:11.792 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (2m) 38s ago 3m 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:26:11.792 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 38s ago 3m - - 2026-04-16T19:26:11.792 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 38s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:26:11.792 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (3m) 38s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:26:12.038 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:26:12.038 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:26:12.038 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:26:12.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:12 vm04 bash[34817]: audit 2026-04-16T19:26:11.555636+0000 mgr.vm01.nwhpas (mgr.14227) 317 : audit [DBG] from='client.15090 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:12.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:12 vm04 bash[34817]: audit 2026-04-16T19:26:11.555636+0000 mgr.vm01.nwhpas (mgr.14227) 317 : audit [DBG] from='client.15090 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:12.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:12 vm04 bash[34817]: audit 2026-04-16T19:26:11.784294+0000 mgr.vm01.nwhpas (mgr.14227) 318 : audit [DBG] from='client.15094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:12.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:12 vm04 bash[34817]: audit 2026-04-16T19:26:11.784294+0000 mgr.vm01.nwhpas (mgr.14227) 318 : audit [DBG] from='client.15094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:12.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:12 vm04 bash[34817]: audit 2026-04-16T19:26:12.035303+0000 mon.vm01 (mon.0) 985 : audit [DBG] from='client.? 192.168.123.101:0/2674395606' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:12.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:12 vm04 bash[34817]: audit 2026-04-16T19:26:12.035303+0000 mon.vm01 (mon.0) 985 : audit [DBG] from='client.? 192.168.123.101:0/2674395606' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:12.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:12 vm01 bash[28222]: audit 2026-04-16T19:26:11.555636+0000 mgr.vm01.nwhpas (mgr.14227) 317 : audit [DBG] from='client.15090 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:12.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:12 vm01 bash[28222]: audit 2026-04-16T19:26:11.555636+0000 mgr.vm01.nwhpas (mgr.14227) 317 : audit [DBG] from='client.15090 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:12.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:12 vm01 bash[28222]: audit 2026-04-16T19:26:11.784294+0000 mgr.vm01.nwhpas (mgr.14227) 318 : audit [DBG] from='client.15094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:12.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:12 vm01 bash[28222]: audit 2026-04-16T19:26:11.784294+0000 mgr.vm01.nwhpas (mgr.14227) 318 : audit [DBG] from='client.15094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:12.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:12 vm01 bash[28222]: audit 2026-04-16T19:26:12.035303+0000 mon.vm01 (mon.0) 985 : audit [DBG] from='client.? 192.168.123.101:0/2674395606' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:12.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:12 vm01 bash[28222]: audit 2026-04-16T19:26:12.035303+0000 mon.vm01 (mon.0) 985 : audit [DBG] from='client.? 192.168.123.101:0/2674395606' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:13.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:13 vm04 bash[34817]: cluster 2026-04-16T19:26:12.065157+0000 mgr.vm01.nwhpas (mgr.14227) 319 : cluster [DBG] pgmap v162: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:13.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:13 vm04 bash[34817]: cluster 2026-04-16T19:26:12.065157+0000 mgr.vm01.nwhpas (mgr.14227) 319 : cluster [DBG] pgmap v162: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:13.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:13 vm01 bash[28222]: cluster 2026-04-16T19:26:12.065157+0000 mgr.vm01.nwhpas (mgr.14227) 319 : cluster [DBG] pgmap v162: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:13.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:13 vm01 bash[28222]: cluster 2026-04-16T19:26:12.065157+0000 mgr.vm01.nwhpas (mgr.14227) 319 : cluster [DBG] pgmap v162: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:15.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:15 vm04 bash[34817]: cluster 2026-04-16T19:26:14.065609+0000 mgr.vm01.nwhpas (mgr.14227) 320 : cluster [DBG] pgmap v163: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:15.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:15 vm04 bash[34817]: cluster 2026-04-16T19:26:14.065609+0000 mgr.vm01.nwhpas (mgr.14227) 320 : cluster [DBG] pgmap v163: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:15.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:15 vm01 bash[28222]: cluster 2026-04-16T19:26:14.065609+0000 mgr.vm01.nwhpas (mgr.14227) 320 : cluster [DBG] pgmap v163: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:15.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:15 vm01 bash[28222]: cluster 2026-04-16T19:26:14.065609+0000 mgr.vm01.nwhpas (mgr.14227) 320 : cluster [DBG] pgmap v163: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:17.248 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:26:17.552 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:26:17.552 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (2m) 43s ago 3m 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:26:17.552 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 43s ago 3m - - 2026-04-16T19:26:17.552 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 44s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:26:17.552 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (3m) 44s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:26:17.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:17 vm04 bash[34817]: cluster 2026-04-16T19:26:16.066216+0000 mgr.vm01.nwhpas (mgr.14227) 321 : cluster [DBG] pgmap v164: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:17.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:17 vm04 bash[34817]: cluster 2026-04-16T19:26:16.066216+0000 mgr.vm01.nwhpas (mgr.14227) 321 : cluster [DBG] pgmap v164: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:17.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:17 vm01 bash[28222]: cluster 2026-04-16T19:26:16.066216+0000 mgr.vm01.nwhpas (mgr.14227) 321 : cluster [DBG] pgmap v164: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:17.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:17 vm01 bash[28222]: cluster 2026-04-16T19:26:16.066216+0000 mgr.vm01.nwhpas (mgr.14227) 321 : cluster [DBG] pgmap v164: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:17.805 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:26:17.805 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:26:17.805 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:26:18.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:18 vm04 bash[34817]: audit 2026-04-16T19:26:17.225717+0000 mgr.vm01.nwhpas (mgr.14227) 322 : audit [DBG] from='client.15102 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:18.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:18 vm04 bash[34817]: audit 2026-04-16T19:26:17.225717+0000 mgr.vm01.nwhpas (mgr.14227) 322 : audit [DBG] from='client.15102 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:18.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:18 vm04 bash[34817]: audit 2026-04-16T19:26:17.546709+0000 mgr.vm01.nwhpas (mgr.14227) 323 : audit [DBG] from='client.15106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:18.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:18 vm04 bash[34817]: audit 2026-04-16T19:26:17.546709+0000 mgr.vm01.nwhpas (mgr.14227) 323 : audit [DBG] from='client.15106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:18.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:18 vm04 bash[34817]: audit 2026-04-16T19:26:17.801643+0000 mon.vm01 (mon.0) 986 : audit [DBG] from='client.? 192.168.123.101:0/289163912' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:18.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:18 vm04 bash[34817]: audit 2026-04-16T19:26:17.801643+0000 mon.vm01 (mon.0) 986 : audit [DBG] from='client.? 192.168.123.101:0/289163912' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:18.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:18 vm01 bash[28222]: audit 2026-04-16T19:26:17.225717+0000 mgr.vm01.nwhpas (mgr.14227) 322 : audit [DBG] from='client.15102 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:18.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:18 vm01 bash[28222]: audit 2026-04-16T19:26:17.225717+0000 mgr.vm01.nwhpas (mgr.14227) 322 : audit [DBG] from='client.15102 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:18.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:18 vm01 bash[28222]: audit 2026-04-16T19:26:17.546709+0000 mgr.vm01.nwhpas (mgr.14227) 323 : audit [DBG] from='client.15106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:18.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:18 vm01 bash[28222]: audit 2026-04-16T19:26:17.546709+0000 mgr.vm01.nwhpas (mgr.14227) 323 : audit [DBG] from='client.15106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:18.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:18 vm01 bash[28222]: audit 2026-04-16T19:26:17.801643+0000 mon.vm01 (mon.0) 986 : audit [DBG] from='client.? 192.168.123.101:0/289163912' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:18.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:18 vm01 bash[28222]: audit 2026-04-16T19:26:17.801643+0000 mon.vm01 (mon.0) 986 : audit [DBG] from='client.? 192.168.123.101:0/289163912' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:19.708 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:19 vm04 bash[34817]: cluster 2026-04-16T19:26:18.066685+0000 mgr.vm01.nwhpas (mgr.14227) 324 : cluster [DBG] pgmap v165: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:19 vm04 bash[34817]: cluster 2026-04-16T19:26:18.066685+0000 mgr.vm01.nwhpas (mgr.14227) 324 : cluster [DBG] pgmap v165: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:19.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:19 vm01 bash[28222]: cluster 2026-04-16T19:26:18.066685+0000 mgr.vm01.nwhpas (mgr.14227) 324 : cluster [DBG] pgmap v165: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:19.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:19 vm01 bash[28222]: cluster 2026-04-16T19:26:18.066685+0000 mgr.vm01.nwhpas (mgr.14227) 324 : cluster [DBG] pgmap v165: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:21.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:21 vm01 bash[28222]: cluster 2026-04-16T19:26:20.067172+0000 mgr.vm01.nwhpas (mgr.14227) 325 : cluster [DBG] pgmap v166: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:21.711 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:21 vm01 bash[28222]: cluster 2026-04-16T19:26:20.067172+0000 mgr.vm01.nwhpas (mgr.14227) 325 : cluster [DBG] pgmap v166: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:21.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:21 vm04 bash[34817]: cluster 2026-04-16T19:26:20.067172+0000 mgr.vm01.nwhpas (mgr.14227) 325 : cluster [DBG] pgmap v166: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:21.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:21 vm04 bash[34817]: cluster 2026-04-16T19:26:20.067172+0000 mgr.vm01.nwhpas (mgr.14227) 325 : cluster [DBG] pgmap v166: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:23.024 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:26:23.210 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:26:23.210 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (2m) 49s ago 3m 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:26:23.210 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 49s ago 3m - - 2026-04-16T19:26:23.210 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 50s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:26:23.210 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (3m) 50s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:26:23.459 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:26:23.459 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:26:23.459 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:26:23.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:23 vm04 bash[34817]: cluster 2026-04-16T19:26:22.067663+0000 mgr.vm01.nwhpas (mgr.14227) 326 : cluster [DBG] pgmap v167: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:23.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:23 vm04 bash[34817]: cluster 2026-04-16T19:26:22.067663+0000 mgr.vm01.nwhpas (mgr.14227) 326 : cluster [DBG] pgmap v167: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:23 vm04 bash[34817]: audit 2026-04-16T19:26:22.565809+0000 mon.vm01 (mon.0) 987 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:23 vm04 bash[34817]: audit 2026-04-16T19:26:22.565809+0000 mon.vm01 (mon.0) 987 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:23 vm04 bash[34817]: audit 2026-04-16T19:26:23.456425+0000 mon.vm01 (mon.0) 988 : audit [DBG] from='client.? 192.168.123.101:0/2954400431' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:23 vm04 bash[34817]: audit 2026-04-16T19:26:23.456425+0000 mon.vm01 (mon.0) 988 : audit [DBG] from='client.? 192.168.123.101:0/2954400431' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:23.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:23 vm01 bash[28222]: cluster 2026-04-16T19:26:22.067663+0000 mgr.vm01.nwhpas (mgr.14227) 326 : cluster [DBG] pgmap v167: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:23.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:23 vm01 bash[28222]: cluster 2026-04-16T19:26:22.067663+0000 mgr.vm01.nwhpas (mgr.14227) 326 : cluster [DBG] pgmap v167: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:23.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:23 vm01 bash[28222]: audit 2026-04-16T19:26:22.565809+0000 mon.vm01 (mon.0) 987 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:23.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:23 vm01 bash[28222]: audit 2026-04-16T19:26:22.565809+0000 mon.vm01 (mon.0) 987 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:23.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:23 vm01 bash[28222]: audit 2026-04-16T19:26:23.456425+0000 mon.vm01 (mon.0) 988 : audit [DBG] from='client.? 192.168.123.101:0/2954400431' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:23.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:23 vm01 bash[28222]: audit 2026-04-16T19:26:23.456425+0000 mon.vm01 (mon.0) 988 : audit [DBG] from='client.? 192.168.123.101:0/2954400431' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:24.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:24 vm04 bash[34817]: audit 2026-04-16T19:26:23.001613+0000 mgr.vm01.nwhpas (mgr.14227) 327 : audit [DBG] from='client.15114 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:24.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:24 vm04 bash[34817]: audit 2026-04-16T19:26:23.001613+0000 mgr.vm01.nwhpas (mgr.14227) 327 : audit [DBG] from='client.15114 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:24.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:24 vm04 bash[34817]: audit 2026-04-16T19:26:23.203568+0000 mgr.vm01.nwhpas (mgr.14227) 328 : audit [DBG] from='client.15118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:24.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:24 vm04 bash[34817]: audit 2026-04-16T19:26:23.203568+0000 mgr.vm01.nwhpas (mgr.14227) 328 : audit [DBG] from='client.15118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:24.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:24 vm01 bash[28222]: audit 2026-04-16T19:26:23.001613+0000 mgr.vm01.nwhpas (mgr.14227) 327 : audit [DBG] from='client.15114 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:24.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:24 vm01 bash[28222]: audit 2026-04-16T19:26:23.001613+0000 mgr.vm01.nwhpas (mgr.14227) 327 : audit [DBG] from='client.15114 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:24.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:24 vm01 bash[28222]: audit 2026-04-16T19:26:23.203568+0000 mgr.vm01.nwhpas (mgr.14227) 328 : audit [DBG] from='client.15118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:24.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:24 vm01 bash[28222]: audit 2026-04-16T19:26:23.203568+0000 mgr.vm01.nwhpas (mgr.14227) 328 : audit [DBG] from='client.15118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:25.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:25 vm04 bash[34817]: cluster 2026-04-16T19:26:24.068114+0000 mgr.vm01.nwhpas (mgr.14227) 329 : cluster [DBG] pgmap v168: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:25.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:25 vm04 bash[34817]: cluster 2026-04-16T19:26:24.068114+0000 mgr.vm01.nwhpas (mgr.14227) 329 : cluster [DBG] pgmap v168: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:25.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:25 vm01 bash[28222]: cluster 2026-04-16T19:26:24.068114+0000 mgr.vm01.nwhpas (mgr.14227) 329 : cluster [DBG] pgmap v168: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:25.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:25 vm01 bash[28222]: cluster 2026-04-16T19:26:24.068114+0000 mgr.vm01.nwhpas (mgr.14227) 329 : cluster [DBG] pgmap v168: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:27.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:27 vm04 bash[34817]: cluster 2026-04-16T19:26:26.068607+0000 mgr.vm01.nwhpas (mgr.14227) 330 : cluster [DBG] pgmap v169: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:27.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:27 vm04 bash[34817]: cluster 2026-04-16T19:26:26.068607+0000 mgr.vm01.nwhpas (mgr.14227) 330 : cluster [DBG] pgmap v169: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:27.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:27 vm01 bash[28222]: cluster 2026-04-16T19:26:26.068607+0000 mgr.vm01.nwhpas (mgr.14227) 330 : cluster [DBG] pgmap v169: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:27.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:27 vm01 bash[28222]: cluster 2026-04-16T19:26:26.068607+0000 mgr.vm01.nwhpas (mgr.14227) 330 : cluster [DBG] pgmap v169: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:28.665 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:26:28.855 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:26:28.855 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (2m) 55s ago 3m 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:26:28.855 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 55s ago 3m - - 2026-04-16T19:26:28.855 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 55s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:26:28.855 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (3m) 55s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:26:29.094 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:26:29.094 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:26:29.094 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:26:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:29 vm04 bash[34817]: cluster 2026-04-16T19:26:28.068989+0000 mgr.vm01.nwhpas (mgr.14227) 331 : cluster [DBG] pgmap v170: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:29 vm04 bash[34817]: cluster 2026-04-16T19:26:28.068989+0000 mgr.vm01.nwhpas (mgr.14227) 331 : cluster [DBG] pgmap v170: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:29 vm04 bash[34817]: audit 2026-04-16T19:26:28.645154+0000 mgr.vm01.nwhpas (mgr.14227) 332 : audit [DBG] from='client.15126 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:29 vm04 bash[34817]: audit 2026-04-16T19:26:28.645154+0000 mgr.vm01.nwhpas (mgr.14227) 332 : audit [DBG] from='client.15126 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:29 vm04 bash[34817]: audit 2026-04-16T19:26:28.848527+0000 mgr.vm01.nwhpas (mgr.14227) 333 : audit [DBG] from='client.15130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:29 vm04 bash[34817]: audit 2026-04-16T19:26:28.848527+0000 mgr.vm01.nwhpas (mgr.14227) 333 : audit [DBG] from='client.15130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:29 vm04 bash[34817]: audit 2026-04-16T19:26:29.090916+0000 mon.vm01 (mon.0) 989 : audit [DBG] from='client.? 192.168.123.101:0/248127336' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:29.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:29 vm04 bash[34817]: audit 2026-04-16T19:26:29.090916+0000 mon.vm01 (mon.0) 989 : audit [DBG] from='client.? 192.168.123.101:0/248127336' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:29.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:29 vm01 bash[28222]: cluster 2026-04-16T19:26:28.068989+0000 mgr.vm01.nwhpas (mgr.14227) 331 : cluster [DBG] pgmap v170: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:29.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:29 vm01 bash[28222]: cluster 2026-04-16T19:26:28.068989+0000 mgr.vm01.nwhpas (mgr.14227) 331 : cluster [DBG] pgmap v170: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:29.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:29 vm01 bash[28222]: audit 2026-04-16T19:26:28.645154+0000 mgr.vm01.nwhpas (mgr.14227) 332 : audit [DBG] from='client.15126 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:29.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:29 vm01 bash[28222]: audit 2026-04-16T19:26:28.645154+0000 mgr.vm01.nwhpas (mgr.14227) 332 : audit [DBG] from='client.15126 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:29.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:29 vm01 bash[28222]: audit 2026-04-16T19:26:28.848527+0000 mgr.vm01.nwhpas (mgr.14227) 333 : audit [DBG] from='client.15130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:29.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:29 vm01 bash[28222]: audit 2026-04-16T19:26:28.848527+0000 mgr.vm01.nwhpas (mgr.14227) 333 : audit [DBG] from='client.15130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:29.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:29 vm01 bash[28222]: audit 2026-04-16T19:26:29.090916+0000 mon.vm01 (mon.0) 989 : audit [DBG] from='client.? 192.168.123.101:0/248127336' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:29.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:29 vm01 bash[28222]: audit 2026-04-16T19:26:29.090916+0000 mon.vm01 (mon.0) 989 : audit [DBG] from='client.? 192.168.123.101:0/248127336' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:31.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:31 vm04 bash[34817]: cluster 2026-04-16T19:26:30.069394+0000 mgr.vm01.nwhpas (mgr.14227) 334 : cluster [DBG] pgmap v171: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:31.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:31 vm04 bash[34817]: cluster 2026-04-16T19:26:30.069394+0000 mgr.vm01.nwhpas (mgr.14227) 334 : cluster [DBG] pgmap v171: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:31.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:31 vm01 bash[28222]: cluster 2026-04-16T19:26:30.069394+0000 mgr.vm01.nwhpas (mgr.14227) 334 : cluster [DBG] pgmap v171: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:31.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:31 vm01 bash[28222]: cluster 2026-04-16T19:26:30.069394+0000 mgr.vm01.nwhpas (mgr.14227) 334 : cluster [DBG] pgmap v171: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:33.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:33 vm04 bash[34817]: cluster 2026-04-16T19:26:32.069797+0000 mgr.vm01.nwhpas (mgr.14227) 335 : cluster [DBG] pgmap v172: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:33.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:33 vm04 bash[34817]: cluster 2026-04-16T19:26:32.069797+0000 mgr.vm01.nwhpas (mgr.14227) 335 : cluster [DBG] pgmap v172: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:33.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:33 vm01 bash[28222]: cluster 2026-04-16T19:26:32.069797+0000 mgr.vm01.nwhpas (mgr.14227) 335 : cluster [DBG] pgmap v172: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:33.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:33 vm01 bash[28222]: cluster 2026-04-16T19:26:32.069797+0000 mgr.vm01.nwhpas (mgr.14227) 335 : cluster [DBG] pgmap v172: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:34.300 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:26:34.505 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:26:34.505 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (2m) 60s ago 3m 100M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:26:34.505 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 60s ago 3m - - 2026-04-16T19:26:34.505 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 61s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:26:34.505 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (3m) 61s ago 3m 106M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:26:34.749 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:26:34.750 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:26:34.750 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:26:34.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:34 vm04 bash[34817]: audit 2026-04-16T19:26:34.075916+0000 mon.vm01 (mon.0) 990 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:26:34.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:34 vm04 bash[34817]: audit 2026-04-16T19:26:34.075916+0000 mon.vm01 (mon.0) 990 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:26:34.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:34 vm01 bash[28222]: audit 2026-04-16T19:26:34.075916+0000 mon.vm01 (mon.0) 990 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:26:34.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:34 vm01 bash[28222]: audit 2026-04-16T19:26:34.075916+0000 mon.vm01 (mon.0) 990 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:26:35.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:35 vm04 bash[34817]: cluster 2026-04-16T19:26:34.070308+0000 mgr.vm01.nwhpas (mgr.14227) 336 : cluster [DBG] pgmap v173: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:35.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:35 vm04 bash[34817]: cluster 2026-04-16T19:26:34.070308+0000 mgr.vm01.nwhpas (mgr.14227) 336 : cluster [DBG] pgmap v173: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:35.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:35 vm04 bash[34817]: audit 2026-04-16T19:26:34.280723+0000 mgr.vm01.nwhpas (mgr.14227) 337 : audit [DBG] from='client.15138 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:35.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:35 vm04 bash[34817]: audit 2026-04-16T19:26:34.280723+0000 mgr.vm01.nwhpas (mgr.14227) 337 : audit [DBG] from='client.15138 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:35.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:35 vm04 bash[34817]: audit 2026-04-16T19:26:34.497559+0000 mgr.vm01.nwhpas (mgr.14227) 338 : audit [DBG] from='client.15142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:35.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:35 vm04 bash[34817]: audit 2026-04-16T19:26:34.497559+0000 mgr.vm01.nwhpas (mgr.14227) 338 : audit [DBG] from='client.15142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:35.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:35 vm04 bash[34817]: audit 2026-04-16T19:26:34.746282+0000 mon.vm01 (mon.0) 991 : audit [DBG] from='client.? 192.168.123.101:0/1889595568' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:35.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:35 vm04 bash[34817]: audit 2026-04-16T19:26:34.746282+0000 mon.vm01 (mon.0) 991 : audit [DBG] from='client.? 192.168.123.101:0/1889595568' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:35.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:35 vm01 bash[28222]: cluster 2026-04-16T19:26:34.070308+0000 mgr.vm01.nwhpas (mgr.14227) 336 : cluster [DBG] pgmap v173: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:35.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:35 vm01 bash[28222]: cluster 2026-04-16T19:26:34.070308+0000 mgr.vm01.nwhpas (mgr.14227) 336 : cluster [DBG] pgmap v173: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:35.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:35 vm01 bash[28222]: audit 2026-04-16T19:26:34.280723+0000 mgr.vm01.nwhpas (mgr.14227) 337 : audit [DBG] from='client.15138 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:35.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:35 vm01 bash[28222]: audit 2026-04-16T19:26:34.280723+0000 mgr.vm01.nwhpas (mgr.14227) 337 : audit [DBG] from='client.15138 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:35.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:35 vm01 bash[28222]: audit 2026-04-16T19:26:34.497559+0000 mgr.vm01.nwhpas (mgr.14227) 338 : audit [DBG] from='client.15142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:35.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:35 vm01 bash[28222]: audit 2026-04-16T19:26:34.497559+0000 mgr.vm01.nwhpas (mgr.14227) 338 : audit [DBG] from='client.15142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:35.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:35 vm01 bash[28222]: audit 2026-04-16T19:26:34.746282+0000 mon.vm01 (mon.0) 991 : audit [DBG] from='client.? 192.168.123.101:0/1889595568' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:35.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:35 vm01 bash[28222]: audit 2026-04-16T19:26:34.746282+0000 mon.vm01 (mon.0) 991 : audit [DBG] from='client.? 192.168.123.101:0/1889595568' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:37.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:37 vm04 bash[34817]: cluster 2026-04-16T19:26:36.070715+0000 mgr.vm01.nwhpas (mgr.14227) 339 : cluster [DBG] pgmap v174: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:37.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:37 vm04 bash[34817]: cluster 2026-04-16T19:26:36.070715+0000 mgr.vm01.nwhpas (mgr.14227) 339 : cluster [DBG] pgmap v174: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:37.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:37 vm01 bash[28222]: cluster 2026-04-16T19:26:36.070715+0000 mgr.vm01.nwhpas (mgr.14227) 339 : cluster [DBG] pgmap v174: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:37.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:37 vm01 bash[28222]: cluster 2026-04-16T19:26:36.070715+0000 mgr.vm01.nwhpas (mgr.14227) 339 : cluster [DBG] pgmap v174: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:38.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:38 vm04 bash[34817]: audit 2026-04-16T19:26:37.565862+0000 mon.vm01 (mon.0) 992 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:38.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:38 vm04 bash[34817]: audit 2026-04-16T19:26:37.565862+0000 mon.vm01 (mon.0) 992 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:38.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:38 vm01 bash[28222]: audit 2026-04-16T19:26:37.565862+0000 mon.vm01 (mon.0) 992 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:38.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:38 vm01 bash[28222]: audit 2026-04-16T19:26:37.565862+0000 mon.vm01 (mon.0) 992 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:39.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:39 vm04 bash[34817]: cluster 2026-04-16T19:26:38.071074+0000 mgr.vm01.nwhpas (mgr.14227) 340 : cluster [DBG] pgmap v175: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:39.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:39 vm04 bash[34817]: cluster 2026-04-16T19:26:38.071074+0000 mgr.vm01.nwhpas (mgr.14227) 340 : cluster [DBG] pgmap v175: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:39.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:39 vm04 bash[34817]: audit 2026-04-16T19:26:38.893794+0000 mon.vm01 (mon.0) 993 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:39 vm04 bash[34817]: audit 2026-04-16T19:26:38.893794+0000 mon.vm01 (mon.0) 993 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:39 vm04 bash[34817]: audit 2026-04-16T19:26:38.899159+0000 mon.vm01 (mon.0) 994 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:39 vm04 bash[34817]: audit 2026-04-16T19:26:38.899159+0000 mon.vm01 (mon.0) 994 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:39 vm04 bash[34817]: audit 2026-04-16T19:26:39.443919+0000 mon.vm01 (mon.0) 995 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:39 vm04 bash[34817]: audit 2026-04-16T19:26:39.443919+0000 mon.vm01 (mon.0) 995 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:39 vm04 bash[34817]: audit 2026-04-16T19:26:39.449947+0000 mon.vm01 (mon.0) 996 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:39 vm04 bash[34817]: audit 2026-04-16T19:26:39.449947+0000 mon.vm01 (mon.0) 996 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:39 vm01 bash[28222]: cluster 2026-04-16T19:26:38.071074+0000 mgr.vm01.nwhpas (mgr.14227) 340 : cluster [DBG] pgmap v175: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:39.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:39 vm01 bash[28222]: cluster 2026-04-16T19:26:38.071074+0000 mgr.vm01.nwhpas (mgr.14227) 340 : cluster [DBG] pgmap v175: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:39.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:39 vm01 bash[28222]: audit 2026-04-16T19:26:38.893794+0000 mon.vm01 (mon.0) 993 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:39 vm01 bash[28222]: audit 2026-04-16T19:26:38.893794+0000 mon.vm01 (mon.0) 993 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:39 vm01 bash[28222]: audit 2026-04-16T19:26:38.899159+0000 mon.vm01 (mon.0) 994 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:39 vm01 bash[28222]: audit 2026-04-16T19:26:38.899159+0000 mon.vm01 (mon.0) 994 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:39 vm01 bash[28222]: audit 2026-04-16T19:26:39.443919+0000 mon.vm01 (mon.0) 995 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:39 vm01 bash[28222]: audit 2026-04-16T19:26:39.443919+0000 mon.vm01 (mon.0) 995 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:39 vm01 bash[28222]: audit 2026-04-16T19:26:39.449947+0000 mon.vm01 (mon.0) 996 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:39 vm01 bash[28222]: audit 2026-04-16T19:26:39.449947+0000 mon.vm01 (mon.0) 996 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:39.961 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:26:40.149 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:26:40.149 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (2m) 0s ago 3m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:26:40.149 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 0s ago 3m - - 2026-04-16T19:26:40.149 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 1s ago 3m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:26:40.149 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (3m) 1s ago 3m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:26:40.383 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:26:40.383 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:26:40.383 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:26:40.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: audit 2026-04-16T19:26:39.788690+0000 mon.vm01 (mon.0) 997 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: audit 2026-04-16T19:26:39.788690+0000 mon.vm01 (mon.0) 997 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: audit 2026-04-16T19:26:39.789296+0000 mon.vm01 (mon.0) 998 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: audit 2026-04-16T19:26:39.789296+0000 mon.vm01 (mon.0) 998 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: cluster 2026-04-16T19:26:39.790391+0000 mgr.vm01.nwhpas (mgr.14227) 341 : cluster [DBG] pgmap v176: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: cluster 2026-04-16T19:26:39.790391+0000 mgr.vm01.nwhpas (mgr.14227) 341 : cluster [DBG] pgmap v176: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: cluster 2026-04-16T19:26:39.790520+0000 mgr.vm01.nwhpas (mgr.14227) 342 : cluster [DBG] pgmap v177: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: cluster 2026-04-16T19:26:39.790520+0000 mgr.vm01.nwhpas (mgr.14227) 342 : cluster [DBG] pgmap v177: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: audit 2026-04-16T19:26:39.795191+0000 mon.vm01 (mon.0) 999 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: audit 2026-04-16T19:26:39.795191+0000 mon.vm01 (mon.0) 999 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: audit 2026-04-16T19:26:39.796629+0000 mon.vm01 (mon.0) 1000 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: audit 2026-04-16T19:26:39.796629+0000 mon.vm01 (mon.0) 1000 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: audit 2026-04-16T19:26:40.380324+0000 mon.vm01 (mon.0) 1001 : audit [DBG] from='client.? 192.168.123.101:0/3068619454' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:40.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:40 vm04 bash[34817]: audit 2026-04-16T19:26:40.380324+0000 mon.vm01 (mon.0) 1001 : audit [DBG] from='client.? 192.168.123.101:0/3068619454' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: audit 2026-04-16T19:26:39.788690+0000 mon.vm01 (mon.0) 997 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: audit 2026-04-16T19:26:39.788690+0000 mon.vm01 (mon.0) 997 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: audit 2026-04-16T19:26:39.789296+0000 mon.vm01 (mon.0) 998 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: audit 2026-04-16T19:26:39.789296+0000 mon.vm01 (mon.0) 998 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: cluster 2026-04-16T19:26:39.790391+0000 mgr.vm01.nwhpas (mgr.14227) 341 : cluster [DBG] pgmap v176: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: cluster 2026-04-16T19:26:39.790391+0000 mgr.vm01.nwhpas (mgr.14227) 341 : cluster [DBG] pgmap v176: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: cluster 2026-04-16T19:26:39.790520+0000 mgr.vm01.nwhpas (mgr.14227) 342 : cluster [DBG] pgmap v177: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: cluster 2026-04-16T19:26:39.790520+0000 mgr.vm01.nwhpas (mgr.14227) 342 : cluster [DBG] pgmap v177: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: audit 2026-04-16T19:26:39.795191+0000 mon.vm01 (mon.0) 999 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: audit 2026-04-16T19:26:39.795191+0000 mon.vm01 (mon.0) 999 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: audit 2026-04-16T19:26:39.796629+0000 mon.vm01 (mon.0) 1000 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: audit 2026-04-16T19:26:39.796629+0000 mon.vm01 (mon.0) 1000 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: audit 2026-04-16T19:26:40.380324+0000 mon.vm01 (mon.0) 1001 : audit [DBG] from='client.? 192.168.123.101:0/3068619454' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:40.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:40 vm01 bash[28222]: audit 2026-04-16T19:26:40.380324+0000 mon.vm01 (mon.0) 1001 : audit [DBG] from='client.? 192.168.123.101:0/3068619454' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:41.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:41 vm04 bash[34817]: audit 2026-04-16T19:26:39.939858+0000 mgr.vm01.nwhpas (mgr.14227) 343 : audit [DBG] from='client.24735 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:41.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:41 vm04 bash[34817]: audit 2026-04-16T19:26:39.939858+0000 mgr.vm01.nwhpas (mgr.14227) 343 : audit [DBG] from='client.24735 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:41.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:41 vm04 bash[34817]: audit 2026-04-16T19:26:40.142738+0000 mgr.vm01.nwhpas (mgr.14227) 344 : audit [DBG] from='client.15152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:41.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:41 vm04 bash[34817]: audit 2026-04-16T19:26:40.142738+0000 mgr.vm01.nwhpas (mgr.14227) 344 : audit [DBG] from='client.15152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:41.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:41 vm01 bash[28222]: audit 2026-04-16T19:26:39.939858+0000 mgr.vm01.nwhpas (mgr.14227) 343 : audit [DBG] from='client.24735 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:41.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:41 vm01 bash[28222]: audit 2026-04-16T19:26:39.939858+0000 mgr.vm01.nwhpas (mgr.14227) 343 : audit [DBG] from='client.24735 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:41.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:41 vm01 bash[28222]: audit 2026-04-16T19:26:40.142738+0000 mgr.vm01.nwhpas (mgr.14227) 344 : audit [DBG] from='client.15152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:41.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:41 vm01 bash[28222]: audit 2026-04-16T19:26:40.142738+0000 mgr.vm01.nwhpas (mgr.14227) 344 : audit [DBG] from='client.15152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:42.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:42 vm04 bash[34817]: cluster 2026-04-16T19:26:41.790967+0000 mgr.vm01.nwhpas (mgr.14227) 345 : cluster [DBG] pgmap v178: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:42.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:42 vm04 bash[34817]: cluster 2026-04-16T19:26:41.790967+0000 mgr.vm01.nwhpas (mgr.14227) 345 : cluster [DBG] pgmap v178: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:42.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:42 vm01 bash[28222]: cluster 2026-04-16T19:26:41.790967+0000 mgr.vm01.nwhpas (mgr.14227) 345 : cluster [DBG] pgmap v178: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:42.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:42 vm01 bash[28222]: cluster 2026-04-16T19:26:41.790967+0000 mgr.vm01.nwhpas (mgr.14227) 345 : cluster [DBG] pgmap v178: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:45.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:44 vm04 bash[34817]: cluster 2026-04-16T19:26:43.791376+0000 mgr.vm01.nwhpas (mgr.14227) 346 : cluster [DBG] pgmap v179: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:45.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:44 vm04 bash[34817]: cluster 2026-04-16T19:26:43.791376+0000 mgr.vm01.nwhpas (mgr.14227) 346 : cluster [DBG] pgmap v179: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:45.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:44 vm01 bash[28222]: cluster 2026-04-16T19:26:43.791376+0000 mgr.vm01.nwhpas (mgr.14227) 346 : cluster [DBG] pgmap v179: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:45.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:44 vm01 bash[28222]: cluster 2026-04-16T19:26:43.791376+0000 mgr.vm01.nwhpas (mgr.14227) 346 : cluster [DBG] pgmap v179: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:45.602 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:26:45.807 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:26:45.807 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (2m) 6s ago 3m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:26:45.807 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 6s ago 3m - - 2026-04-16T19:26:45.807 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 6s ago 3m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:26:45.807 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (3m) 6s ago 3m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:26:46.074 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:26:46.074 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:26:46.074 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:26:47.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:46 vm04 bash[34817]: audit 2026-04-16T19:26:45.581562+0000 mgr.vm01.nwhpas (mgr.14227) 347 : audit [DBG] from='client.15160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:47.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:46 vm04 bash[34817]: audit 2026-04-16T19:26:45.581562+0000 mgr.vm01.nwhpas (mgr.14227) 347 : audit [DBG] from='client.15160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:47.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:46 vm04 bash[34817]: cluster 2026-04-16T19:26:45.791864+0000 mgr.vm01.nwhpas (mgr.14227) 348 : cluster [DBG] pgmap v180: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:47.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:46 vm04 bash[34817]: cluster 2026-04-16T19:26:45.791864+0000 mgr.vm01.nwhpas (mgr.14227) 348 : cluster [DBG] pgmap v180: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:47.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:46 vm04 bash[34817]: audit 2026-04-16T19:26:45.801081+0000 mgr.vm01.nwhpas (mgr.14227) 349 : audit [DBG] from='client.15164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:47.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:46 vm04 bash[34817]: audit 2026-04-16T19:26:45.801081+0000 mgr.vm01.nwhpas (mgr.14227) 349 : audit [DBG] from='client.15164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:47.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:46 vm04 bash[34817]: audit 2026-04-16T19:26:46.071027+0000 mon.vm01 (mon.0) 1002 : audit [DBG] from='client.? 192.168.123.101:0/2092021642' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:47.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:46 vm04 bash[34817]: audit 2026-04-16T19:26:46.071027+0000 mon.vm01 (mon.0) 1002 : audit [DBG] from='client.? 192.168.123.101:0/2092021642' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:47.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:46 vm01 bash[28222]: audit 2026-04-16T19:26:45.581562+0000 mgr.vm01.nwhpas (mgr.14227) 347 : audit [DBG] from='client.15160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:47.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:46 vm01 bash[28222]: audit 2026-04-16T19:26:45.581562+0000 mgr.vm01.nwhpas (mgr.14227) 347 : audit [DBG] from='client.15160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:47.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:46 vm01 bash[28222]: cluster 2026-04-16T19:26:45.791864+0000 mgr.vm01.nwhpas (mgr.14227) 348 : cluster [DBG] pgmap v180: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:47.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:46 vm01 bash[28222]: cluster 2026-04-16T19:26:45.791864+0000 mgr.vm01.nwhpas (mgr.14227) 348 : cluster [DBG] pgmap v180: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:47.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:46 vm01 bash[28222]: audit 2026-04-16T19:26:45.801081+0000 mgr.vm01.nwhpas (mgr.14227) 349 : audit [DBG] from='client.15164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:47.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:46 vm01 bash[28222]: audit 2026-04-16T19:26:45.801081+0000 mgr.vm01.nwhpas (mgr.14227) 349 : audit [DBG] from='client.15164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:47.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:46 vm01 bash[28222]: audit 2026-04-16T19:26:46.071027+0000 mon.vm01 (mon.0) 1002 : audit [DBG] from='client.? 192.168.123.101:0/2092021642' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:47.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:46 vm01 bash[28222]: audit 2026-04-16T19:26:46.071027+0000 mon.vm01 (mon.0) 1002 : audit [DBG] from='client.? 192.168.123.101:0/2092021642' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:49.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:48 vm04 bash[34817]: cluster 2026-04-16T19:26:47.792184+0000 mgr.vm01.nwhpas (mgr.14227) 350 : cluster [DBG] pgmap v181: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:49.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:48 vm04 bash[34817]: cluster 2026-04-16T19:26:47.792184+0000 mgr.vm01.nwhpas (mgr.14227) 350 : cluster [DBG] pgmap v181: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:49.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:48 vm01 bash[28222]: cluster 2026-04-16T19:26:47.792184+0000 mgr.vm01.nwhpas (mgr.14227) 350 : cluster [DBG] pgmap v181: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:49.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:48 vm01 bash[28222]: cluster 2026-04-16T19:26:47.792184+0000 mgr.vm01.nwhpas (mgr.14227) 350 : cluster [DBG] pgmap v181: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 210 B/s rd, 421 B/s wr, 0 op/s 2026-04-16T19:26:51.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:50 vm04 bash[34817]: cluster 2026-04-16T19:26:49.792654+0000 mgr.vm01.nwhpas (mgr.14227) 351 : cluster [DBG] pgmap v182: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:26:51.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:50 vm04 bash[34817]: cluster 2026-04-16T19:26:49.792654+0000 mgr.vm01.nwhpas (mgr.14227) 351 : cluster [DBG] pgmap v182: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:26:51.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:50 vm01 bash[28222]: cluster 2026-04-16T19:26:49.792654+0000 mgr.vm01.nwhpas (mgr.14227) 351 : cluster [DBG] pgmap v182: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:26:51.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:50 vm01 bash[28222]: cluster 2026-04-16T19:26:49.792654+0000 mgr.vm01.nwhpas (mgr.14227) 351 : cluster [DBG] pgmap v182: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:26:51.303 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:26:51.515 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:26:51.515 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (2m) 12s ago 3m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:26:51.515 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 12s ago 3m - - 2026-04-16T19:26:51.515 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 12s ago 3m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:26:51.515 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (3m) 12s ago 3m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:26:51.782 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:26:51.782 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:26:51.782 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:26:52.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:51 vm04 bash[34817]: audit 2026-04-16T19:26:51.280012+0000 mgr.vm01.nwhpas (mgr.14227) 352 : audit [DBG] from='client.15172 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:52.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:51 vm04 bash[34817]: audit 2026-04-16T19:26:51.280012+0000 mgr.vm01.nwhpas (mgr.14227) 352 : audit [DBG] from='client.15172 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:52.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:51 vm04 bash[34817]: audit 2026-04-16T19:26:51.508043+0000 mgr.vm01.nwhpas (mgr.14227) 353 : audit [DBG] from='client.15176 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:52.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:51 vm04 bash[34817]: audit 2026-04-16T19:26:51.508043+0000 mgr.vm01.nwhpas (mgr.14227) 353 : audit [DBG] from='client.15176 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:52.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:51 vm04 bash[34817]: audit 2026-04-16T19:26:51.778291+0000 mon.vm01 (mon.0) 1003 : audit [DBG] from='client.? 192.168.123.101:0/2399992564' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:52.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:51 vm04 bash[34817]: audit 2026-04-16T19:26:51.778291+0000 mon.vm01 (mon.0) 1003 : audit [DBG] from='client.? 192.168.123.101:0/2399992564' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:52.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:51 vm04 bash[34817]: cluster 2026-04-16T19:26:51.793650+0000 mgr.vm01.nwhpas (mgr.14227) 354 : cluster [DBG] pgmap v183: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:52.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:51 vm04 bash[34817]: cluster 2026-04-16T19:26:51.793650+0000 mgr.vm01.nwhpas (mgr.14227) 354 : cluster [DBG] pgmap v183: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:52.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:51 vm01 bash[28222]: audit 2026-04-16T19:26:51.280012+0000 mgr.vm01.nwhpas (mgr.14227) 352 : audit [DBG] from='client.15172 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:52.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:51 vm01 bash[28222]: audit 2026-04-16T19:26:51.280012+0000 mgr.vm01.nwhpas (mgr.14227) 352 : audit [DBG] from='client.15172 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:52.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:51 vm01 bash[28222]: audit 2026-04-16T19:26:51.508043+0000 mgr.vm01.nwhpas (mgr.14227) 353 : audit [DBG] from='client.15176 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:52.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:51 vm01 bash[28222]: audit 2026-04-16T19:26:51.508043+0000 mgr.vm01.nwhpas (mgr.14227) 353 : audit [DBG] from='client.15176 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:52.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:51 vm01 bash[28222]: audit 2026-04-16T19:26:51.778291+0000 mon.vm01 (mon.0) 1003 : audit [DBG] from='client.? 192.168.123.101:0/2399992564' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:52.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:51 vm01 bash[28222]: audit 2026-04-16T19:26:51.778291+0000 mon.vm01 (mon.0) 1003 : audit [DBG] from='client.? 192.168.123.101:0/2399992564' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:52.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:51 vm01 bash[28222]: cluster 2026-04-16T19:26:51.793650+0000 mgr.vm01.nwhpas (mgr.14227) 354 : cluster [DBG] pgmap v183: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:52.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:51 vm01 bash[28222]: cluster 2026-04-16T19:26:51.793650+0000 mgr.vm01.nwhpas (mgr.14227) 354 : cluster [DBG] pgmap v183: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:53.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:52 vm04 bash[34817]: audit 2026-04-16T19:26:52.566321+0000 mon.vm01 (mon.0) 1004 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:53.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:52 vm04 bash[34817]: audit 2026-04-16T19:26:52.566321+0000 mon.vm01 (mon.0) 1004 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:53.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:52 vm01 bash[28222]: audit 2026-04-16T19:26:52.566321+0000 mon.vm01 (mon.0) 1004 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:53.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:52 vm01 bash[28222]: audit 2026-04-16T19:26:52.566321+0000 mon.vm01 (mon.0) 1004 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:26:54.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:53 vm04 bash[34817]: cluster 2026-04-16T19:26:53.794051+0000 mgr.vm01.nwhpas (mgr.14227) 355 : cluster [DBG] pgmap v184: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:54.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:53 vm04 bash[34817]: cluster 2026-04-16T19:26:53.794051+0000 mgr.vm01.nwhpas (mgr.14227) 355 : cluster [DBG] pgmap v184: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:54.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:53 vm01 bash[28222]: cluster 2026-04-16T19:26:53.794051+0000 mgr.vm01.nwhpas (mgr.14227) 355 : cluster [DBG] pgmap v184: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:54.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:53 vm01 bash[28222]: cluster 2026-04-16T19:26:53.794051+0000 mgr.vm01.nwhpas (mgr.14227) 355 : cluster [DBG] pgmap v184: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:26:57.034 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:26:57.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:56 vm04 bash[34817]: cluster 2026-04-16T19:26:55.794498+0000 mgr.vm01.nwhpas (mgr.14227) 356 : cluster [DBG] pgmap v185: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:57.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:56 vm04 bash[34817]: cluster 2026-04-16T19:26:55.794498+0000 mgr.vm01.nwhpas (mgr.14227) 356 : cluster [DBG] pgmap v185: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:57.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:56 vm01 bash[28222]: cluster 2026-04-16T19:26:55.794498+0000 mgr.vm01.nwhpas (mgr.14227) 356 : cluster [DBG] pgmap v185: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:57.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:56 vm01 bash[28222]: cluster 2026-04-16T19:26:55.794498+0000 mgr.vm01.nwhpas (mgr.14227) 356 : cluster [DBG] pgmap v185: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:26:57.220 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:26:57.220 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (3m) 17s ago 3m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:26:57.220 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 17s ago 3m - - 2026-04-16T19:26:57.220 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 18s ago 3m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:26:57.220 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (3m) 18s ago 3m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:26:57.477 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:26:57.477 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:26:57.477 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:26:58.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:57 vm04 bash[34817]: audit 2026-04-16T19:26:57.473402+0000 mon.vm01 (mon.0) 1005 : audit [DBG] from='client.? 192.168.123.101:0/648486942' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:58.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:57 vm04 bash[34817]: audit 2026-04-16T19:26:57.473402+0000 mon.vm01 (mon.0) 1005 : audit [DBG] from='client.? 192.168.123.101:0/648486942' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:58.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:57 vm01 bash[28222]: audit 2026-04-16T19:26:57.473402+0000 mon.vm01 (mon.0) 1005 : audit [DBG] from='client.? 192.168.123.101:0/648486942' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:58.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:57 vm01 bash[28222]: audit 2026-04-16T19:26:57.473402+0000 mon.vm01 (mon.0) 1005 : audit [DBG] from='client.? 192.168.123.101:0/648486942' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:26:59.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:58 vm04 bash[34817]: audit 2026-04-16T19:26:57.013664+0000 mgr.vm01.nwhpas (mgr.14227) 357 : audit [DBG] from='client.24761 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:59.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:58 vm04 bash[34817]: audit 2026-04-16T19:26:57.013664+0000 mgr.vm01.nwhpas (mgr.14227) 357 : audit [DBG] from='client.24761 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:59.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:58 vm04 bash[34817]: audit 2026-04-16T19:26:57.213425+0000 mgr.vm01.nwhpas (mgr.14227) 358 : audit [DBG] from='client.15188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:59.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:58 vm04 bash[34817]: audit 2026-04-16T19:26:57.213425+0000 mgr.vm01.nwhpas (mgr.14227) 358 : audit [DBG] from='client.15188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:59.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:58 vm04 bash[34817]: cluster 2026-04-16T19:26:57.794869+0000 mgr.vm01.nwhpas (mgr.14227) 359 : cluster [DBG] pgmap v186: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:59.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:58 vm04 bash[34817]: cluster 2026-04-16T19:26:57.794869+0000 mgr.vm01.nwhpas (mgr.14227) 359 : cluster [DBG] pgmap v186: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:59.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:58 vm01 bash[28222]: audit 2026-04-16T19:26:57.013664+0000 mgr.vm01.nwhpas (mgr.14227) 357 : audit [DBG] from='client.24761 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:59.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:58 vm01 bash[28222]: audit 2026-04-16T19:26:57.013664+0000 mgr.vm01.nwhpas (mgr.14227) 357 : audit [DBG] from='client.24761 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:59.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:58 vm01 bash[28222]: audit 2026-04-16T19:26:57.213425+0000 mgr.vm01.nwhpas (mgr.14227) 358 : audit [DBG] from='client.15188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:59.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:58 vm01 bash[28222]: audit 2026-04-16T19:26:57.213425+0000 mgr.vm01.nwhpas (mgr.14227) 358 : audit [DBG] from='client.15188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:26:59.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:58 vm01 bash[28222]: cluster 2026-04-16T19:26:57.794869+0000 mgr.vm01.nwhpas (mgr.14227) 359 : cluster [DBG] pgmap v186: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:26:59.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:58 vm01 bash[28222]: cluster 2026-04-16T19:26:57.794869+0000 mgr.vm01.nwhpas (mgr.14227) 359 : cluster [DBG] pgmap v186: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:00.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:59 vm04 bash[34817]: cluster 2026-04-16T19:26:59.795288+0000 mgr.vm01.nwhpas (mgr.14227) 360 : cluster [DBG] pgmap v187: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:00.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:26:59 vm04 bash[34817]: cluster 2026-04-16T19:26:59.795288+0000 mgr.vm01.nwhpas (mgr.14227) 360 : cluster [DBG] pgmap v187: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:00.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:59 vm01 bash[28222]: cluster 2026-04-16T19:26:59.795288+0000 mgr.vm01.nwhpas (mgr.14227) 360 : cluster [DBG] pgmap v187: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:00.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:26:59 vm01 bash[28222]: cluster 2026-04-16T19:26:59.795288+0000 mgr.vm01.nwhpas (mgr.14227) 360 : cluster [DBG] pgmap v187: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:02.700 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:27:02.905 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:27:02.905 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (3m) 23s ago 3m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:27:02.905 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 23s ago 3m - - 2026-04-16T19:27:02.905 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 24s ago 3m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:27:02.905 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (4m) 24s ago 4m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:27:03.141 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:27:03.141 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:27:03.141 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:27:03.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:02 vm04 bash[34817]: cluster 2026-04-16T19:27:01.795815+0000 mgr.vm01.nwhpas (mgr.14227) 361 : cluster [DBG] pgmap v188: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:03.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:02 vm04 bash[34817]: cluster 2026-04-16T19:27:01.795815+0000 mgr.vm01.nwhpas (mgr.14227) 361 : cluster [DBG] pgmap v188: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:03.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:02 vm01 bash[28222]: cluster 2026-04-16T19:27:01.795815+0000 mgr.vm01.nwhpas (mgr.14227) 361 : cluster [DBG] pgmap v188: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:03.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:02 vm01 bash[28222]: cluster 2026-04-16T19:27:01.795815+0000 mgr.vm01.nwhpas (mgr.14227) 361 : cluster [DBG] pgmap v188: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:04.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:03 vm04 bash[34817]: audit 2026-04-16T19:27:02.672554+0000 mgr.vm01.nwhpas (mgr.14227) 362 : audit [DBG] from='client.15196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:04.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:03 vm04 bash[34817]: audit 2026-04-16T19:27:02.672554+0000 mgr.vm01.nwhpas (mgr.14227) 362 : audit [DBG] from='client.15196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:04.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:03 vm04 bash[34817]: audit 2026-04-16T19:27:03.137833+0000 mon.vm01 (mon.0) 1006 : audit [DBG] from='client.? 192.168.123.101:0/1449666945' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:04.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:03 vm04 bash[34817]: audit 2026-04-16T19:27:03.137833+0000 mon.vm01 (mon.0) 1006 : audit [DBG] from='client.? 192.168.123.101:0/1449666945' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:04.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:03 vm01 bash[28222]: audit 2026-04-16T19:27:02.672554+0000 mgr.vm01.nwhpas (mgr.14227) 362 : audit [DBG] from='client.15196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:04.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:03 vm01 bash[28222]: audit 2026-04-16T19:27:02.672554+0000 mgr.vm01.nwhpas (mgr.14227) 362 : audit [DBG] from='client.15196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:04.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:03 vm01 bash[28222]: audit 2026-04-16T19:27:03.137833+0000 mon.vm01 (mon.0) 1006 : audit [DBG] from='client.? 192.168.123.101:0/1449666945' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:04.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:03 vm01 bash[28222]: audit 2026-04-16T19:27:03.137833+0000 mon.vm01 (mon.0) 1006 : audit [DBG] from='client.? 192.168.123.101:0/1449666945' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:05.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:04 vm04 bash[34817]: audit 2026-04-16T19:27:02.898867+0000 mgr.vm01.nwhpas (mgr.14227) 363 : audit [DBG] from='client.15200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:05.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:04 vm04 bash[34817]: audit 2026-04-16T19:27:02.898867+0000 mgr.vm01.nwhpas (mgr.14227) 363 : audit [DBG] from='client.15200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:05.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:04 vm04 bash[34817]: cluster 2026-04-16T19:27:03.796237+0000 mgr.vm01.nwhpas (mgr.14227) 364 : cluster [DBG] pgmap v189: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:05.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:04 vm04 bash[34817]: cluster 2026-04-16T19:27:03.796237+0000 mgr.vm01.nwhpas (mgr.14227) 364 : cluster [DBG] pgmap v189: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:05.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:04 vm01 bash[28222]: audit 2026-04-16T19:27:02.898867+0000 mgr.vm01.nwhpas (mgr.14227) 363 : audit [DBG] from='client.15200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:05.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:04 vm01 bash[28222]: audit 2026-04-16T19:27:02.898867+0000 mgr.vm01.nwhpas (mgr.14227) 363 : audit [DBG] from='client.15200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:05.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:04 vm01 bash[28222]: cluster 2026-04-16T19:27:03.796237+0000 mgr.vm01.nwhpas (mgr.14227) 364 : cluster [DBG] pgmap v189: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:05.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:04 vm01 bash[28222]: cluster 2026-04-16T19:27:03.796237+0000 mgr.vm01.nwhpas (mgr.14227) 364 : cluster [DBG] pgmap v189: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:06.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:05 vm04 bash[34817]: cluster 2026-04-16T19:27:05.796612+0000 mgr.vm01.nwhpas (mgr.14227) 365 : cluster [DBG] pgmap v190: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:06.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:05 vm04 bash[34817]: cluster 2026-04-16T19:27:05.796612+0000 mgr.vm01.nwhpas (mgr.14227) 365 : cluster [DBG] pgmap v190: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:06.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:05 vm01 bash[28222]: cluster 2026-04-16T19:27:05.796612+0000 mgr.vm01.nwhpas (mgr.14227) 365 : cluster [DBG] pgmap v190: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:06.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:05 vm01 bash[28222]: cluster 2026-04-16T19:27:05.796612+0000 mgr.vm01.nwhpas (mgr.14227) 365 : cluster [DBG] pgmap v190: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:07.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:07 vm04 bash[34817]: audit 2026-04-16T19:27:07.566474+0000 mon.vm01 (mon.0) 1007 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:07.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:07 vm04 bash[34817]: audit 2026-04-16T19:27:07.566474+0000 mon.vm01 (mon.0) 1007 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:07.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:07 vm01 bash[28222]: audit 2026-04-16T19:27:07.566474+0000 mon.vm01 (mon.0) 1007 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:07.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:07 vm01 bash[28222]: audit 2026-04-16T19:27:07.566474+0000 mon.vm01 (mon.0) 1007 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:08.352 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:27:08.543 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:27:08.543 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (3m) 29s ago 4m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:27:08.543 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 29s ago 4m - - 2026-04-16T19:27:08.543 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 29s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:27:08.543 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (4m) 29s ago 4m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:27:08.806 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:27:08.806 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:27:08.806 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:27:08.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:08 vm04 bash[34817]: cluster 2026-04-16T19:27:07.796928+0000 mgr.vm01.nwhpas (mgr.14227) 366 : cluster [DBG] pgmap v191: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:08.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:08 vm04 bash[34817]: cluster 2026-04-16T19:27:07.796928+0000 mgr.vm01.nwhpas (mgr.14227) 366 : cluster [DBG] pgmap v191: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:08.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:08 vm01 bash[28222]: cluster 2026-04-16T19:27:07.796928+0000 mgr.vm01.nwhpas (mgr.14227) 366 : cluster [DBG] pgmap v191: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:08.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:08 vm01 bash[28222]: cluster 2026-04-16T19:27:07.796928+0000 mgr.vm01.nwhpas (mgr.14227) 366 : cluster [DBG] pgmap v191: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:09.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:09 vm04 bash[34817]: audit 2026-04-16T19:27:08.331879+0000 mgr.vm01.nwhpas (mgr.14227) 367 : audit [DBG] from='client.15208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:09.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:09 vm04 bash[34817]: audit 2026-04-16T19:27:08.331879+0000 mgr.vm01.nwhpas (mgr.14227) 367 : audit [DBG] from='client.15208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:09.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:09 vm04 bash[34817]: audit 2026-04-16T19:27:08.537019+0000 mgr.vm01.nwhpas (mgr.14227) 368 : audit [DBG] from='client.15212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:09.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:09 vm04 bash[34817]: audit 2026-04-16T19:27:08.537019+0000 mgr.vm01.nwhpas (mgr.14227) 368 : audit [DBG] from='client.15212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:09.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:09 vm04 bash[34817]: audit 2026-04-16T19:27:08.802674+0000 mon.vm01 (mon.0) 1008 : audit [DBG] from='client.? 192.168.123.101:0/1265695741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:09.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:09 vm04 bash[34817]: audit 2026-04-16T19:27:08.802674+0000 mon.vm01 (mon.0) 1008 : audit [DBG] from='client.? 192.168.123.101:0/1265695741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:09.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:09 vm01 bash[28222]: audit 2026-04-16T19:27:08.331879+0000 mgr.vm01.nwhpas (mgr.14227) 367 : audit [DBG] from='client.15208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:09.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:09 vm01 bash[28222]: audit 2026-04-16T19:27:08.331879+0000 mgr.vm01.nwhpas (mgr.14227) 367 : audit [DBG] from='client.15208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:09.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:09 vm01 bash[28222]: audit 2026-04-16T19:27:08.537019+0000 mgr.vm01.nwhpas (mgr.14227) 368 : audit [DBG] from='client.15212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:09.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:09 vm01 bash[28222]: audit 2026-04-16T19:27:08.537019+0000 mgr.vm01.nwhpas (mgr.14227) 368 : audit [DBG] from='client.15212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:09.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:09 vm01 bash[28222]: audit 2026-04-16T19:27:08.802674+0000 mon.vm01 (mon.0) 1008 : audit [DBG] from='client.? 192.168.123.101:0/1265695741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:09.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:09 vm01 bash[28222]: audit 2026-04-16T19:27:08.802674+0000 mon.vm01 (mon.0) 1008 : audit [DBG] from='client.? 192.168.123.101:0/1265695741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:10.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:10 vm04 bash[34817]: cluster 2026-04-16T19:27:09.797350+0000 mgr.vm01.nwhpas (mgr.14227) 369 : cluster [DBG] pgmap v192: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:10.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:10 vm04 bash[34817]: cluster 2026-04-16T19:27:09.797350+0000 mgr.vm01.nwhpas (mgr.14227) 369 : cluster [DBG] pgmap v192: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:10.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:10 vm01 bash[28222]: cluster 2026-04-16T19:27:09.797350+0000 mgr.vm01.nwhpas (mgr.14227) 369 : cluster [DBG] pgmap v192: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:10.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:10 vm01 bash[28222]: cluster 2026-04-16T19:27:09.797350+0000 mgr.vm01.nwhpas (mgr.14227) 369 : cluster [DBG] pgmap v192: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:13.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:12 vm04 bash[34817]: cluster 2026-04-16T19:27:11.797822+0000 mgr.vm01.nwhpas (mgr.14227) 370 : cluster [DBG] pgmap v193: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:13.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:12 vm04 bash[34817]: cluster 2026-04-16T19:27:11.797822+0000 mgr.vm01.nwhpas (mgr.14227) 370 : cluster [DBG] pgmap v193: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:13.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:12 vm01 bash[28222]: cluster 2026-04-16T19:27:11.797822+0000 mgr.vm01.nwhpas (mgr.14227) 370 : cluster [DBG] pgmap v193: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:13.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:12 vm01 bash[28222]: cluster 2026-04-16T19:27:11.797822+0000 mgr.vm01.nwhpas (mgr.14227) 370 : cluster [DBG] pgmap v193: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:14.021 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:27:14.213 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:27:14.213 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (3m) 34s ago 4m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:27:14.213 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 34s ago 4m - - 2026-04-16T19:27:14.213 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 35s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:27:14.213 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (4m) 35s ago 4m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:27:14.449 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:27:14.449 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:27:14.449 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:27:15.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:14 vm04 bash[34817]: cluster 2026-04-16T19:27:13.798245+0000 mgr.vm01.nwhpas (mgr.14227) 371 : cluster [DBG] pgmap v194: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:15.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:14 vm04 bash[34817]: cluster 2026-04-16T19:27:13.798245+0000 mgr.vm01.nwhpas (mgr.14227) 371 : cluster [DBG] pgmap v194: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:15.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:14 vm04 bash[34817]: audit 2026-04-16T19:27:14.445176+0000 mon.vm01 (mon.0) 1009 : audit [DBG] from='client.? 192.168.123.101:0/1648378862' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:15.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:14 vm04 bash[34817]: audit 2026-04-16T19:27:14.445176+0000 mon.vm01 (mon.0) 1009 : audit [DBG] from='client.? 192.168.123.101:0/1648378862' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:15.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:14 vm01 bash[28222]: cluster 2026-04-16T19:27:13.798245+0000 mgr.vm01.nwhpas (mgr.14227) 371 : cluster [DBG] pgmap v194: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:15.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:14 vm01 bash[28222]: cluster 2026-04-16T19:27:13.798245+0000 mgr.vm01.nwhpas (mgr.14227) 371 : cluster [DBG] pgmap v194: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:15.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:14 vm01 bash[28222]: audit 2026-04-16T19:27:14.445176+0000 mon.vm01 (mon.0) 1009 : audit [DBG] from='client.? 192.168.123.101:0/1648378862' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:15.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:14 vm01 bash[28222]: audit 2026-04-16T19:27:14.445176+0000 mon.vm01 (mon.0) 1009 : audit [DBG] from='client.? 192.168.123.101:0/1648378862' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:16.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:15 vm04 bash[34817]: audit 2026-04-16T19:27:13.998175+0000 mgr.vm01.nwhpas (mgr.14227) 372 : audit [DBG] from='client.15220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:16.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:15 vm04 bash[34817]: audit 2026-04-16T19:27:13.998175+0000 mgr.vm01.nwhpas (mgr.14227) 372 : audit [DBG] from='client.15220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:16.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:15 vm04 bash[34817]: audit 2026-04-16T19:27:14.206667+0000 mgr.vm01.nwhpas (mgr.14227) 373 : audit [DBG] from='client.15224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:16.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:15 vm04 bash[34817]: audit 2026-04-16T19:27:14.206667+0000 mgr.vm01.nwhpas (mgr.14227) 373 : audit [DBG] from='client.15224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:16.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:15 vm01 bash[28222]: audit 2026-04-16T19:27:13.998175+0000 mgr.vm01.nwhpas (mgr.14227) 372 : audit [DBG] from='client.15220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:16.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:15 vm01 bash[28222]: audit 2026-04-16T19:27:13.998175+0000 mgr.vm01.nwhpas (mgr.14227) 372 : audit [DBG] from='client.15220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:16.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:15 vm01 bash[28222]: audit 2026-04-16T19:27:14.206667+0000 mgr.vm01.nwhpas (mgr.14227) 373 : audit [DBG] from='client.15224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:16.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:15 vm01 bash[28222]: audit 2026-04-16T19:27:14.206667+0000 mgr.vm01.nwhpas (mgr.14227) 373 : audit [DBG] from='client.15224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:17.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:16 vm04 bash[34817]: cluster 2026-04-16T19:27:15.798760+0000 mgr.vm01.nwhpas (mgr.14227) 374 : cluster [DBG] pgmap v195: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:17.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:16 vm04 bash[34817]: cluster 2026-04-16T19:27:15.798760+0000 mgr.vm01.nwhpas (mgr.14227) 374 : cluster [DBG] pgmap v195: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:17.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:16 vm01 bash[28222]: cluster 2026-04-16T19:27:15.798760+0000 mgr.vm01.nwhpas (mgr.14227) 374 : cluster [DBG] pgmap v195: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:17.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:16 vm01 bash[28222]: cluster 2026-04-16T19:27:15.798760+0000 mgr.vm01.nwhpas (mgr.14227) 374 : cluster [DBG] pgmap v195: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:18.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:17 vm04 bash[34817]: cluster 2026-04-16T19:27:17.799259+0000 mgr.vm01.nwhpas (mgr.14227) 375 : cluster [DBG] pgmap v196: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:18.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:17 vm04 bash[34817]: cluster 2026-04-16T19:27:17.799259+0000 mgr.vm01.nwhpas (mgr.14227) 375 : cluster [DBG] pgmap v196: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:18.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:17 vm01 bash[28222]: cluster 2026-04-16T19:27:17.799259+0000 mgr.vm01.nwhpas (mgr.14227) 375 : cluster [DBG] pgmap v196: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:18.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:17 vm01 bash[28222]: cluster 2026-04-16T19:27:17.799259+0000 mgr.vm01.nwhpas (mgr.14227) 375 : cluster [DBG] pgmap v196: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:19.679 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:27:19.877 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:27:19.878 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (3m) 40s ago 4m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:27:19.878 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 40s ago 4m - - 2026-04-16T19:27:19.878 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 40s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:27:19.878 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (4m) 40s ago 4m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:27:20.135 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:27:20.135 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:27:20.135 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:27:21.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:20 vm04 bash[34817]: audit 2026-04-16T19:27:19.657076+0000 mgr.vm01.nwhpas (mgr.14227) 376 : audit [DBG] from='client.15232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:21.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:20 vm04 bash[34817]: audit 2026-04-16T19:27:19.657076+0000 mgr.vm01.nwhpas (mgr.14227) 376 : audit [DBG] from='client.15232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:21.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:20 vm04 bash[34817]: cluster 2026-04-16T19:27:19.799761+0000 mgr.vm01.nwhpas (mgr.14227) 377 : cluster [DBG] pgmap v197: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:21.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:20 vm04 bash[34817]: cluster 2026-04-16T19:27:19.799761+0000 mgr.vm01.nwhpas (mgr.14227) 377 : cluster [DBG] pgmap v197: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:21.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:20 vm04 bash[34817]: audit 2026-04-16T19:27:20.131804+0000 mon.vm01 (mon.0) 1010 : audit [DBG] from='client.? 192.168.123.101:0/1671421852' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:21.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:20 vm04 bash[34817]: audit 2026-04-16T19:27:20.131804+0000 mon.vm01 (mon.0) 1010 : audit [DBG] from='client.? 192.168.123.101:0/1671421852' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:21.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:20 vm01 bash[28222]: audit 2026-04-16T19:27:19.657076+0000 mgr.vm01.nwhpas (mgr.14227) 376 : audit [DBG] from='client.15232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:21.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:20 vm01 bash[28222]: audit 2026-04-16T19:27:19.657076+0000 mgr.vm01.nwhpas (mgr.14227) 376 : audit [DBG] from='client.15232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:21.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:20 vm01 bash[28222]: cluster 2026-04-16T19:27:19.799761+0000 mgr.vm01.nwhpas (mgr.14227) 377 : cluster [DBG] pgmap v197: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:21.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:20 vm01 bash[28222]: cluster 2026-04-16T19:27:19.799761+0000 mgr.vm01.nwhpas (mgr.14227) 377 : cluster [DBG] pgmap v197: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:21.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:20 vm01 bash[28222]: audit 2026-04-16T19:27:20.131804+0000 mon.vm01 (mon.0) 1010 : audit [DBG] from='client.? 192.168.123.101:0/1671421852' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:21.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:20 vm01 bash[28222]: audit 2026-04-16T19:27:20.131804+0000 mon.vm01 (mon.0) 1010 : audit [DBG] from='client.? 192.168.123.101:0/1671421852' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:22.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:21 vm04 bash[34817]: audit 2026-04-16T19:27:19.870830+0000 mgr.vm01.nwhpas (mgr.14227) 378 : audit [DBG] from='client.15236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:22.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:21 vm04 bash[34817]: audit 2026-04-16T19:27:19.870830+0000 mgr.vm01.nwhpas (mgr.14227) 378 : audit [DBG] from='client.15236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:22.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:21 vm04 bash[34817]: cluster 2026-04-16T19:27:21.800247+0000 mgr.vm01.nwhpas (mgr.14227) 379 : cluster [DBG] pgmap v198: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:22.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:21 vm04 bash[34817]: cluster 2026-04-16T19:27:21.800247+0000 mgr.vm01.nwhpas (mgr.14227) 379 : cluster [DBG] pgmap v198: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:22.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:21 vm01 bash[28222]: audit 2026-04-16T19:27:19.870830+0000 mgr.vm01.nwhpas (mgr.14227) 378 : audit [DBG] from='client.15236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:22.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:21 vm01 bash[28222]: audit 2026-04-16T19:27:19.870830+0000 mgr.vm01.nwhpas (mgr.14227) 378 : audit [DBG] from='client.15236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:22.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:21 vm01 bash[28222]: cluster 2026-04-16T19:27:21.800247+0000 mgr.vm01.nwhpas (mgr.14227) 379 : cluster [DBG] pgmap v198: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:22.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:21 vm01 bash[28222]: cluster 2026-04-16T19:27:21.800247+0000 mgr.vm01.nwhpas (mgr.14227) 379 : cluster [DBG] pgmap v198: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:22 vm04 bash[34817]: audit 2026-04-16T19:27:22.566771+0000 mon.vm01 (mon.0) 1011 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:22 vm04 bash[34817]: audit 2026-04-16T19:27:22.566771+0000 mon.vm01 (mon.0) 1011 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:23.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:22 vm01 bash[28222]: audit 2026-04-16T19:27:22.566771+0000 mon.vm01 (mon.0) 1011 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:23.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:22 vm01 bash[28222]: audit 2026-04-16T19:27:22.566771+0000 mon.vm01 (mon.0) 1011 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:24.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:23 vm04 bash[34817]: cluster 2026-04-16T19:27:23.800813+0000 mgr.vm01.nwhpas (mgr.14227) 380 : cluster [DBG] pgmap v199: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:24.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:23 vm04 bash[34817]: cluster 2026-04-16T19:27:23.800813+0000 mgr.vm01.nwhpas (mgr.14227) 380 : cluster [DBG] pgmap v199: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:24.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:23 vm01 bash[28222]: cluster 2026-04-16T19:27:23.800813+0000 mgr.vm01.nwhpas (mgr.14227) 380 : cluster [DBG] pgmap v199: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:24.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:23 vm01 bash[28222]: cluster 2026-04-16T19:27:23.800813+0000 mgr.vm01.nwhpas (mgr.14227) 380 : cluster [DBG] pgmap v199: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:25.343 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:27:25.540 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:27:25.540 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (3m) 46s ago 4m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:27:25.540 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 46s ago 4m - - 2026-04-16T19:27:25.540 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 46s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:27:25.540 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (4m) 46s ago 4m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:27:25.764 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:27:25.764 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:27:25.764 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:27:26.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:25 vm04 bash[34817]: audit 2026-04-16T19:27:25.760612+0000 mon.vm01 (mon.0) 1012 : audit [DBG] from='client.? 192.168.123.101:0/2123663537' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:26.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:25 vm04 bash[34817]: audit 2026-04-16T19:27:25.760612+0000 mon.vm01 (mon.0) 1012 : audit [DBG] from='client.? 192.168.123.101:0/2123663537' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:26.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:25 vm01 bash[28222]: audit 2026-04-16T19:27:25.760612+0000 mon.vm01 (mon.0) 1012 : audit [DBG] from='client.? 192.168.123.101:0/2123663537' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:26.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:25 vm01 bash[28222]: audit 2026-04-16T19:27:25.760612+0000 mon.vm01 (mon.0) 1012 : audit [DBG] from='client.? 192.168.123.101:0/2123663537' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:27.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:26 vm04 bash[34817]: audit 2026-04-16T19:27:25.321422+0000 mgr.vm01.nwhpas (mgr.14227) 381 : audit [DBG] from='client.15244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:27.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:26 vm04 bash[34817]: audit 2026-04-16T19:27:25.321422+0000 mgr.vm01.nwhpas (mgr.14227) 381 : audit [DBG] from='client.15244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:27.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:26 vm04 bash[34817]: audit 2026-04-16T19:27:25.533671+0000 mgr.vm01.nwhpas (mgr.14227) 382 : audit [DBG] from='client.15248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:27.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:26 vm04 bash[34817]: audit 2026-04-16T19:27:25.533671+0000 mgr.vm01.nwhpas (mgr.14227) 382 : audit [DBG] from='client.15248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:27.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:26 vm04 bash[34817]: cluster 2026-04-16T19:27:25.801275+0000 mgr.vm01.nwhpas (mgr.14227) 383 : cluster [DBG] pgmap v200: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:27.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:26 vm04 bash[34817]: cluster 2026-04-16T19:27:25.801275+0000 mgr.vm01.nwhpas (mgr.14227) 383 : cluster [DBG] pgmap v200: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:27.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:26 vm01 bash[28222]: audit 2026-04-16T19:27:25.321422+0000 mgr.vm01.nwhpas (mgr.14227) 381 : audit [DBG] from='client.15244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:27.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:26 vm01 bash[28222]: audit 2026-04-16T19:27:25.321422+0000 mgr.vm01.nwhpas (mgr.14227) 381 : audit [DBG] from='client.15244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:27.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:26 vm01 bash[28222]: audit 2026-04-16T19:27:25.533671+0000 mgr.vm01.nwhpas (mgr.14227) 382 : audit [DBG] from='client.15248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:27.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:26 vm01 bash[28222]: audit 2026-04-16T19:27:25.533671+0000 mgr.vm01.nwhpas (mgr.14227) 382 : audit [DBG] from='client.15248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:27.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:26 vm01 bash[28222]: cluster 2026-04-16T19:27:25.801275+0000 mgr.vm01.nwhpas (mgr.14227) 383 : cluster [DBG] pgmap v200: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:27.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:26 vm01 bash[28222]: cluster 2026-04-16T19:27:25.801275+0000 mgr.vm01.nwhpas (mgr.14227) 383 : cluster [DBG] pgmap v200: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:28.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:27 vm04 bash[34817]: cluster 2026-04-16T19:27:27.801676+0000 mgr.vm01.nwhpas (mgr.14227) 384 : cluster [DBG] pgmap v201: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:28.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:27 vm04 bash[34817]: cluster 2026-04-16T19:27:27.801676+0000 mgr.vm01.nwhpas (mgr.14227) 384 : cluster [DBG] pgmap v201: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:28.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:27 vm01 bash[28222]: cluster 2026-04-16T19:27:27.801676+0000 mgr.vm01.nwhpas (mgr.14227) 384 : cluster [DBG] pgmap v201: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:28.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:27 vm01 bash[28222]: cluster 2026-04-16T19:27:27.801676+0000 mgr.vm01.nwhpas (mgr.14227) 384 : cluster [DBG] pgmap v201: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:30.969 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:27:31.159 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:27:31.160 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (3m) 51s ago 4m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:27:31.160 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 51s ago 4m - - 2026-04-16T19:27:31.160 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 52s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:27:31.160 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (4m) 52s ago 4m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:27:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:30 vm04 bash[34817]: cluster 2026-04-16T19:27:29.802234+0000 mgr.vm01.nwhpas (mgr.14227) 385 : cluster [DBG] pgmap v202: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:31.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:30 vm04 bash[34817]: cluster 2026-04-16T19:27:29.802234+0000 mgr.vm01.nwhpas (mgr.14227) 385 : cluster [DBG] pgmap v202: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:31.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:30 vm01 bash[28222]: cluster 2026-04-16T19:27:29.802234+0000 mgr.vm01.nwhpas (mgr.14227) 385 : cluster [DBG] pgmap v202: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:31.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:30 vm01 bash[28222]: cluster 2026-04-16T19:27:29.802234+0000 mgr.vm01.nwhpas (mgr.14227) 385 : cluster [DBG] pgmap v202: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:31.396 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:27:31.396 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:27:31.396 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:27:32.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:31 vm04 bash[34817]: audit 2026-04-16T19:27:31.392940+0000 mon.vm01 (mon.0) 1013 : audit [DBG] from='client.? 192.168.123.101:0/1318110758' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:32.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:31 vm04 bash[34817]: audit 2026-04-16T19:27:31.392940+0000 mon.vm01 (mon.0) 1013 : audit [DBG] from='client.? 192.168.123.101:0/1318110758' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:32.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:31 vm01 bash[28222]: audit 2026-04-16T19:27:31.392940+0000 mon.vm01 (mon.0) 1013 : audit [DBG] from='client.? 192.168.123.101:0/1318110758' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:32.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:31 vm01 bash[28222]: audit 2026-04-16T19:27:31.392940+0000 mon.vm01 (mon.0) 1013 : audit [DBG] from='client.? 192.168.123.101:0/1318110758' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:33.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:32 vm04 bash[34817]: audit 2026-04-16T19:27:30.947851+0000 mgr.vm01.nwhpas (mgr.14227) 386 : audit [DBG] from='client.15256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:33.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:32 vm04 bash[34817]: audit 2026-04-16T19:27:30.947851+0000 mgr.vm01.nwhpas (mgr.14227) 386 : audit [DBG] from='client.15256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:33.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:32 vm04 bash[34817]: audit 2026-04-16T19:27:31.153046+0000 mgr.vm01.nwhpas (mgr.14227) 387 : audit [DBG] from='client.15260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:33.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:32 vm04 bash[34817]: audit 2026-04-16T19:27:31.153046+0000 mgr.vm01.nwhpas (mgr.14227) 387 : audit [DBG] from='client.15260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:33.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:32 vm04 bash[34817]: cluster 2026-04-16T19:27:31.802651+0000 mgr.vm01.nwhpas (mgr.14227) 388 : cluster [DBG] pgmap v203: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:33.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:32 vm04 bash[34817]: cluster 2026-04-16T19:27:31.802651+0000 mgr.vm01.nwhpas (mgr.14227) 388 : cluster [DBG] pgmap v203: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:33.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:32 vm01 bash[28222]: audit 2026-04-16T19:27:30.947851+0000 mgr.vm01.nwhpas (mgr.14227) 386 : audit [DBG] from='client.15256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:33.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:32 vm01 bash[28222]: audit 2026-04-16T19:27:30.947851+0000 mgr.vm01.nwhpas (mgr.14227) 386 : audit [DBG] from='client.15256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:33.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:32 vm01 bash[28222]: audit 2026-04-16T19:27:31.153046+0000 mgr.vm01.nwhpas (mgr.14227) 387 : audit [DBG] from='client.15260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:33.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:32 vm01 bash[28222]: audit 2026-04-16T19:27:31.153046+0000 mgr.vm01.nwhpas (mgr.14227) 387 : audit [DBG] from='client.15260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:33.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:32 vm01 bash[28222]: cluster 2026-04-16T19:27:31.802651+0000 mgr.vm01.nwhpas (mgr.14227) 388 : cluster [DBG] pgmap v203: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:33.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:32 vm01 bash[28222]: cluster 2026-04-16T19:27:31.802651+0000 mgr.vm01.nwhpas (mgr.14227) 388 : cluster [DBG] pgmap v203: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:34.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:33 vm04 bash[34817]: cluster 2026-04-16T19:27:33.803173+0000 mgr.vm01.nwhpas (mgr.14227) 389 : cluster [DBG] pgmap v204: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:34.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:33 vm04 bash[34817]: cluster 2026-04-16T19:27:33.803173+0000 mgr.vm01.nwhpas (mgr.14227) 389 : cluster [DBG] pgmap v204: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:34.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:33 vm01 bash[28222]: cluster 2026-04-16T19:27:33.803173+0000 mgr.vm01.nwhpas (mgr.14227) 389 : cluster [DBG] pgmap v204: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:34.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:33 vm01 bash[28222]: cluster 2026-04-16T19:27:33.803173+0000 mgr.vm01.nwhpas (mgr.14227) 389 : cluster [DBG] pgmap v204: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:36.614 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:27:36.796 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:27:36.796 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (3m) 57s ago 4m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:27:36.796 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 57s ago 4m - - 2026-04-16T19:27:36.796 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 57s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:27:36.796 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (4m) 57s ago 4m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:27:37.031 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:27:37.031 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:27:37.031 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:27:37.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:36 vm04 bash[34817]: cluster 2026-04-16T19:27:35.803678+0000 mgr.vm01.nwhpas (mgr.14227) 390 : cluster [DBG] pgmap v205: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:37.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:36 vm04 bash[34817]: cluster 2026-04-16T19:27:35.803678+0000 mgr.vm01.nwhpas (mgr.14227) 390 : cluster [DBG] pgmap v205: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:37.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:36 vm01 bash[28222]: cluster 2026-04-16T19:27:35.803678+0000 mgr.vm01.nwhpas (mgr.14227) 390 : cluster [DBG] pgmap v205: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:37.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:36 vm01 bash[28222]: cluster 2026-04-16T19:27:35.803678+0000 mgr.vm01.nwhpas (mgr.14227) 390 : cluster [DBG] pgmap v205: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:37 vm04 bash[34817]: audit 2026-04-16T19:27:36.593723+0000 mgr.vm01.nwhpas (mgr.14227) 391 : audit [DBG] from='client.15268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:37 vm04 bash[34817]: audit 2026-04-16T19:27:36.593723+0000 mgr.vm01.nwhpas (mgr.14227) 391 : audit [DBG] from='client.15268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:37 vm04 bash[34817]: audit 2026-04-16T19:27:36.789533+0000 mgr.vm01.nwhpas (mgr.14227) 392 : audit [DBG] from='client.15272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:37 vm04 bash[34817]: audit 2026-04-16T19:27:36.789533+0000 mgr.vm01.nwhpas (mgr.14227) 392 : audit [DBG] from='client.15272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:37 vm04 bash[34817]: audit 2026-04-16T19:27:37.027071+0000 mon.vm01 (mon.0) 1014 : audit [DBG] from='client.? 192.168.123.101:0/3247167750' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:37 vm04 bash[34817]: audit 2026-04-16T19:27:37.027071+0000 mon.vm01 (mon.0) 1014 : audit [DBG] from='client.? 192.168.123.101:0/3247167750' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:37 vm04 bash[34817]: audit 2026-04-16T19:27:37.566922+0000 mon.vm01 (mon.0) 1015 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:37 vm04 bash[34817]: audit 2026-04-16T19:27:37.566922+0000 mon.vm01 (mon.0) 1015 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:38.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:37 vm01 bash[28222]: audit 2026-04-16T19:27:36.593723+0000 mgr.vm01.nwhpas (mgr.14227) 391 : audit [DBG] from='client.15268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:38.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:37 vm01 bash[28222]: audit 2026-04-16T19:27:36.593723+0000 mgr.vm01.nwhpas (mgr.14227) 391 : audit [DBG] from='client.15268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:38.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:37 vm01 bash[28222]: audit 2026-04-16T19:27:36.789533+0000 mgr.vm01.nwhpas (mgr.14227) 392 : audit [DBG] from='client.15272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:38.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:37 vm01 bash[28222]: audit 2026-04-16T19:27:36.789533+0000 mgr.vm01.nwhpas (mgr.14227) 392 : audit [DBG] from='client.15272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:38.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:37 vm01 bash[28222]: audit 2026-04-16T19:27:37.027071+0000 mon.vm01 (mon.0) 1014 : audit [DBG] from='client.? 192.168.123.101:0/3247167750' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:38.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:37 vm01 bash[28222]: audit 2026-04-16T19:27:37.027071+0000 mon.vm01 (mon.0) 1014 : audit [DBG] from='client.? 192.168.123.101:0/3247167750' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:38.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:37 vm01 bash[28222]: audit 2026-04-16T19:27:37.566922+0000 mon.vm01 (mon.0) 1015 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:38.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:37 vm01 bash[28222]: audit 2026-04-16T19:27:37.566922+0000 mon.vm01 (mon.0) 1015 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:39.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:38 vm04 bash[34817]: cluster 2026-04-16T19:27:37.804028+0000 mgr.vm01.nwhpas (mgr.14227) 393 : cluster [DBG] pgmap v206: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:39.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:38 vm04 bash[34817]: cluster 2026-04-16T19:27:37.804028+0000 mgr.vm01.nwhpas (mgr.14227) 393 : cluster [DBG] pgmap v206: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:39.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:38 vm01 bash[28222]: cluster 2026-04-16T19:27:37.804028+0000 mgr.vm01.nwhpas (mgr.14227) 393 : cluster [DBG] pgmap v206: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:39.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:38 vm01 bash[28222]: cluster 2026-04-16T19:27:37.804028+0000 mgr.vm01.nwhpas (mgr.14227) 393 : cluster [DBG] pgmap v206: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:27:40.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:39 vm04 bash[34817]: cluster 2026-04-16T19:27:39.804454+0000 mgr.vm01.nwhpas (mgr.14227) 394 : cluster [DBG] pgmap v207: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:40.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:39 vm04 bash[34817]: cluster 2026-04-16T19:27:39.804454+0000 mgr.vm01.nwhpas (mgr.14227) 394 : cluster [DBG] pgmap v207: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:40.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:39 vm04 bash[34817]: audit 2026-04-16T19:27:39.811850+0000 mon.vm01 (mon.0) 1016 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:27:40.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:39 vm04 bash[34817]: audit 2026-04-16T19:27:39.811850+0000 mon.vm01 (mon.0) 1016 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:27:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:39 vm01 bash[28222]: cluster 2026-04-16T19:27:39.804454+0000 mgr.vm01.nwhpas (mgr.14227) 394 : cluster [DBG] pgmap v207: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:39 vm01 bash[28222]: cluster 2026-04-16T19:27:39.804454+0000 mgr.vm01.nwhpas (mgr.14227) 394 : cluster [DBG] pgmap v207: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:39 vm01 bash[28222]: audit 2026-04-16T19:27:39.811850+0000 mon.vm01 (mon.0) 1016 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:27:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:39 vm01 bash[28222]: audit 2026-04-16T19:27:39.811850+0000 mon.vm01 (mon.0) 1016 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:27:42.233 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:27:42.456 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:27:42.456 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (3m) 63s ago 4m 105M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:27:42.456 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 63s ago 4m - - 2026-04-16T19:27:42.456 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 63s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:27:42.456 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (4m) 63s ago 4m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:27:42.712 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:27:42.712 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:27:42.712 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:27:43.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:42 vm04 bash[34817]: cluster 2026-04-16T19:27:41.804827+0000 mgr.vm01.nwhpas (mgr.14227) 395 : cluster [DBG] pgmap v208: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:43.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:42 vm04 bash[34817]: cluster 2026-04-16T19:27:41.804827+0000 mgr.vm01.nwhpas (mgr.14227) 395 : cluster [DBG] pgmap v208: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:43.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:42 vm04 bash[34817]: audit 2026-04-16T19:27:42.708299+0000 mon.vm01 (mon.0) 1017 : audit [DBG] from='client.? 192.168.123.101:0/1998331199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:43.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:42 vm04 bash[34817]: audit 2026-04-16T19:27:42.708299+0000 mon.vm01 (mon.0) 1017 : audit [DBG] from='client.? 192.168.123.101:0/1998331199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:43.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:42 vm01 bash[28222]: cluster 2026-04-16T19:27:41.804827+0000 mgr.vm01.nwhpas (mgr.14227) 395 : cluster [DBG] pgmap v208: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:43.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:42 vm01 bash[28222]: cluster 2026-04-16T19:27:41.804827+0000 mgr.vm01.nwhpas (mgr.14227) 395 : cluster [DBG] pgmap v208: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:43.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:42 vm01 bash[28222]: audit 2026-04-16T19:27:42.708299+0000 mon.vm01 (mon.0) 1017 : audit [DBG] from='client.? 192.168.123.101:0/1998331199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:43.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:42 vm01 bash[28222]: audit 2026-04-16T19:27:42.708299+0000 mon.vm01 (mon.0) 1017 : audit [DBG] from='client.? 192.168.123.101:0/1998331199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:44.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:43 vm04 bash[34817]: audit 2026-04-16T19:27:42.212373+0000 mgr.vm01.nwhpas (mgr.14227) 396 : audit [DBG] from='client.15280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:44.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:43 vm04 bash[34817]: audit 2026-04-16T19:27:42.212373+0000 mgr.vm01.nwhpas (mgr.14227) 396 : audit [DBG] from='client.15280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:44.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:43 vm04 bash[34817]: audit 2026-04-16T19:27:42.450014+0000 mgr.vm01.nwhpas (mgr.14227) 397 : audit [DBG] from='client.24817 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:44.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:43 vm04 bash[34817]: audit 2026-04-16T19:27:42.450014+0000 mgr.vm01.nwhpas (mgr.14227) 397 : audit [DBG] from='client.24817 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:44.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:43 vm01 bash[28222]: audit 2026-04-16T19:27:42.212373+0000 mgr.vm01.nwhpas (mgr.14227) 396 : audit [DBG] from='client.15280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:44.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:43 vm01 bash[28222]: audit 2026-04-16T19:27:42.212373+0000 mgr.vm01.nwhpas (mgr.14227) 396 : audit [DBG] from='client.15280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:44.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:43 vm01 bash[28222]: audit 2026-04-16T19:27:42.450014+0000 mgr.vm01.nwhpas (mgr.14227) 397 : audit [DBG] from='client.24817 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:44.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:43 vm01 bash[28222]: audit 2026-04-16T19:27:42.450014+0000 mgr.vm01.nwhpas (mgr.14227) 397 : audit [DBG] from='client.24817 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:45.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:44 vm04 bash[34817]: cluster 2026-04-16T19:27:43.805309+0000 mgr.vm01.nwhpas (mgr.14227) 398 : cluster [DBG] pgmap v209: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:45.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:44 vm04 bash[34817]: cluster 2026-04-16T19:27:43.805309+0000 mgr.vm01.nwhpas (mgr.14227) 398 : cluster [DBG] pgmap v209: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:45.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:44 vm01 bash[28222]: cluster 2026-04-16T19:27:43.805309+0000 mgr.vm01.nwhpas (mgr.14227) 398 : cluster [DBG] pgmap v209: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:45.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:44 vm01 bash[28222]: cluster 2026-04-16T19:27:43.805309+0000 mgr.vm01.nwhpas (mgr.14227) 398 : cluster [DBG] pgmap v209: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.199048+0000 mon.vm01 (mon.0) 1018 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.199048+0000 mon.vm01 (mon.0) 1018 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.204561+0000 mon.vm01 (mon.0) 1019 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.204561+0000 mon.vm01 (mon.0) 1019 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.558495+0000 mon.vm01 (mon.0) 1020 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.558495+0000 mon.vm01 (mon.0) 1020 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.559161+0000 mon.vm01 (mon.0) 1021 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.559161+0000 mon.vm01 (mon.0) 1021 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: cluster 2026-04-16T19:27:45.560335+0000 mgr.vm01.nwhpas (mgr.14227) 399 : cluster [DBG] pgmap v210: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: cluster 2026-04-16T19:27:45.560335+0000 mgr.vm01.nwhpas (mgr.14227) 399 : cluster [DBG] pgmap v210: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.564278+0000 mon.vm01 (mon.0) 1022 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.564278+0000 mon.vm01 (mon.0) 1022 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.566091+0000 mon.vm01 (mon.0) 1023 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:27:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:46 vm04 bash[34817]: audit 2026-04-16T19:27:45.566091+0000 mon.vm01 (mon.0) 1023 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.199048+0000 mon.vm01 (mon.0) 1018 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.199048+0000 mon.vm01 (mon.0) 1018 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.204561+0000 mon.vm01 (mon.0) 1019 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.204561+0000 mon.vm01 (mon.0) 1019 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.558495+0000 mon.vm01 (mon.0) 1020 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.558495+0000 mon.vm01 (mon.0) 1020 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.559161+0000 mon.vm01 (mon.0) 1021 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.559161+0000 mon.vm01 (mon.0) 1021 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: cluster 2026-04-16T19:27:45.560335+0000 mgr.vm01.nwhpas (mgr.14227) 399 : cluster [DBG] pgmap v210: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: cluster 2026-04-16T19:27:45.560335+0000 mgr.vm01.nwhpas (mgr.14227) 399 : cluster [DBG] pgmap v210: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.564278+0000 mon.vm01 (mon.0) 1022 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.564278+0000 mon.vm01 (mon.0) 1022 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.566091+0000 mon.vm01 (mon.0) 1023 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:27:46.461 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:46 vm01 bash[28222]: audit 2026-04-16T19:27:45.566091+0000 mon.vm01 (mon.0) 1023 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:27:47.928 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:27:48.109 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:27:48.109 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (3m) 2s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:27:48.109 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 2s ago 4m - - 2026-04-16T19:27:48.109 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 69s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:27:48.109 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (4m) 69s ago 4m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:27:48.340 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:27:48.341 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:27:48.341 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:27:48.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:48 vm01 bash[28222]: cluster 2026-04-16T19:27:47.560802+0000 mgr.vm01.nwhpas (mgr.14227) 400 : cluster [DBG] pgmap v211: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:48.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:48 vm01 bash[28222]: cluster 2026-04-16T19:27:47.560802+0000 mgr.vm01.nwhpas (mgr.14227) 400 : cluster [DBG] pgmap v211: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:48.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:48 vm01 bash[28222]: audit 2026-04-16T19:27:48.336778+0000 mon.vm01 (mon.0) 1024 : audit [DBG] from='client.? 192.168.123.101:0/2678920746' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:48.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:48 vm01 bash[28222]: audit 2026-04-16T19:27:48.336778+0000 mon.vm01 (mon.0) 1024 : audit [DBG] from='client.? 192.168.123.101:0/2678920746' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:49.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:48 vm04 bash[34817]: cluster 2026-04-16T19:27:47.560802+0000 mgr.vm01.nwhpas (mgr.14227) 400 : cluster [DBG] pgmap v211: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:49.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:48 vm04 bash[34817]: cluster 2026-04-16T19:27:47.560802+0000 mgr.vm01.nwhpas (mgr.14227) 400 : cluster [DBG] pgmap v211: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:49.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:48 vm04 bash[34817]: audit 2026-04-16T19:27:48.336778+0000 mon.vm01 (mon.0) 1024 : audit [DBG] from='client.? 192.168.123.101:0/2678920746' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:49.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:48 vm04 bash[34817]: audit 2026-04-16T19:27:48.336778+0000 mon.vm01 (mon.0) 1024 : audit [DBG] from='client.? 192.168.123.101:0/2678920746' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:50.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:49 vm04 bash[34817]: audit 2026-04-16T19:27:47.907311+0000 mgr.vm01.nwhpas (mgr.14227) 401 : audit [DBG] from='client.15292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:50.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:49 vm04 bash[34817]: audit 2026-04-16T19:27:47.907311+0000 mgr.vm01.nwhpas (mgr.14227) 401 : audit [DBG] from='client.15292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:50.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:49 vm04 bash[34817]: audit 2026-04-16T19:27:48.102986+0000 mgr.vm01.nwhpas (mgr.14227) 402 : audit [DBG] from='client.15296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:50.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:49 vm04 bash[34817]: audit 2026-04-16T19:27:48.102986+0000 mgr.vm01.nwhpas (mgr.14227) 402 : audit [DBG] from='client.15296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:50.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:49 vm01 bash[28222]: audit 2026-04-16T19:27:47.907311+0000 mgr.vm01.nwhpas (mgr.14227) 401 : audit [DBG] from='client.15292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:50.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:49 vm01 bash[28222]: audit 2026-04-16T19:27:47.907311+0000 mgr.vm01.nwhpas (mgr.14227) 401 : audit [DBG] from='client.15292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:50.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:49 vm01 bash[28222]: audit 2026-04-16T19:27:48.102986+0000 mgr.vm01.nwhpas (mgr.14227) 402 : audit [DBG] from='client.15296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:50.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:49 vm01 bash[28222]: audit 2026-04-16T19:27:48.102986+0000 mgr.vm01.nwhpas (mgr.14227) 402 : audit [DBG] from='client.15296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:51.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:50 vm04 bash[34817]: cluster 2026-04-16T19:27:49.561264+0000 mgr.vm01.nwhpas (mgr.14227) 403 : cluster [DBG] pgmap v212: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:51.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:50 vm04 bash[34817]: cluster 2026-04-16T19:27:49.561264+0000 mgr.vm01.nwhpas (mgr.14227) 403 : cluster [DBG] pgmap v212: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:51.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:50 vm01 bash[28222]: cluster 2026-04-16T19:27:49.561264+0000 mgr.vm01.nwhpas (mgr.14227) 403 : cluster [DBG] pgmap v212: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:51.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:50 vm01 bash[28222]: cluster 2026-04-16T19:27:49.561264+0000 mgr.vm01.nwhpas (mgr.14227) 403 : cluster [DBG] pgmap v212: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:53.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:52 vm04 bash[34817]: cluster 2026-04-16T19:27:51.561731+0000 mgr.vm01.nwhpas (mgr.14227) 404 : cluster [DBG] pgmap v213: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:53.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:52 vm04 bash[34817]: cluster 2026-04-16T19:27:51.561731+0000 mgr.vm01.nwhpas (mgr.14227) 404 : cluster [DBG] pgmap v213: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:53.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:52 vm04 bash[34817]: audit 2026-04-16T19:27:52.567342+0000 mon.vm01 (mon.0) 1025 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:53.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:52 vm04 bash[34817]: audit 2026-04-16T19:27:52.567342+0000 mon.vm01 (mon.0) 1025 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:53.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:52 vm01 bash[28222]: cluster 2026-04-16T19:27:51.561731+0000 mgr.vm01.nwhpas (mgr.14227) 404 : cluster [DBG] pgmap v213: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:53.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:52 vm01 bash[28222]: cluster 2026-04-16T19:27:51.561731+0000 mgr.vm01.nwhpas (mgr.14227) 404 : cluster [DBG] pgmap v213: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:53.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:52 vm01 bash[28222]: audit 2026-04-16T19:27:52.567342+0000 mon.vm01 (mon.0) 1025 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:53.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:52 vm01 bash[28222]: audit 2026-04-16T19:27:52.567342+0000 mon.vm01 (mon.0) 1025 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:27:53.554 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:27:53.745 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:27:53.745 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (3m) 8s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:27:53.745 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 8s ago 4m - - 2026-04-16T19:27:53.745 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 74s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:27:53.745 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (4m) 74s ago 4m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:27:53.982 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:27:53.982 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:27:53.982 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:27:55.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:54 vm04 bash[34817]: audit 2026-04-16T19:27:53.532211+0000 mgr.vm01.nwhpas (mgr.14227) 405 : audit [DBG] from='client.15304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:55.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:54 vm04 bash[34817]: audit 2026-04-16T19:27:53.532211+0000 mgr.vm01.nwhpas (mgr.14227) 405 : audit [DBG] from='client.15304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:55.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:54 vm04 bash[34817]: cluster 2026-04-16T19:27:53.562177+0000 mgr.vm01.nwhpas (mgr.14227) 406 : cluster [DBG] pgmap v214: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:55.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:54 vm04 bash[34817]: cluster 2026-04-16T19:27:53.562177+0000 mgr.vm01.nwhpas (mgr.14227) 406 : cluster [DBG] pgmap v214: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:55.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:54 vm04 bash[34817]: audit 2026-04-16T19:27:53.738564+0000 mgr.vm01.nwhpas (mgr.14227) 407 : audit [DBG] from='client.15308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:55.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:54 vm04 bash[34817]: audit 2026-04-16T19:27:53.738564+0000 mgr.vm01.nwhpas (mgr.14227) 407 : audit [DBG] from='client.15308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:55.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:54 vm04 bash[34817]: audit 2026-04-16T19:27:53.978545+0000 mon.vm01 (mon.0) 1026 : audit [DBG] from='client.? 192.168.123.101:0/1770713490' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:55.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:54 vm04 bash[34817]: audit 2026-04-16T19:27:53.978545+0000 mon.vm01 (mon.0) 1026 : audit [DBG] from='client.? 192.168.123.101:0/1770713490' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:55.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:54 vm01 bash[28222]: audit 2026-04-16T19:27:53.532211+0000 mgr.vm01.nwhpas (mgr.14227) 405 : audit [DBG] from='client.15304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:55.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:54 vm01 bash[28222]: audit 2026-04-16T19:27:53.532211+0000 mgr.vm01.nwhpas (mgr.14227) 405 : audit [DBG] from='client.15304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:55.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:54 vm01 bash[28222]: cluster 2026-04-16T19:27:53.562177+0000 mgr.vm01.nwhpas (mgr.14227) 406 : cluster [DBG] pgmap v214: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:55.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:54 vm01 bash[28222]: cluster 2026-04-16T19:27:53.562177+0000 mgr.vm01.nwhpas (mgr.14227) 406 : cluster [DBG] pgmap v214: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 174 B/s rd, 348 B/s wr, 0 op/s 2026-04-16T19:27:55.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:54 vm01 bash[28222]: audit 2026-04-16T19:27:53.738564+0000 mgr.vm01.nwhpas (mgr.14227) 407 : audit [DBG] from='client.15308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:55.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:54 vm01 bash[28222]: audit 2026-04-16T19:27:53.738564+0000 mgr.vm01.nwhpas (mgr.14227) 407 : audit [DBG] from='client.15308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:27:55.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:54 vm01 bash[28222]: audit 2026-04-16T19:27:53.978545+0000 mon.vm01 (mon.0) 1026 : audit [DBG] from='client.? 192.168.123.101:0/1770713490' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:55.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:54 vm01 bash[28222]: audit 2026-04-16T19:27:53.978545+0000 mon.vm01 (mon.0) 1026 : audit [DBG] from='client.? 192.168.123.101:0/1770713490' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:27:56.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:56 vm01 bash[28222]: cluster 2026-04-16T19:27:55.562642+0000 mgr.vm01.nwhpas (mgr.14227) 408 : cluster [DBG] pgmap v215: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:56.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:56 vm01 bash[28222]: cluster 2026-04-16T19:27:55.562642+0000 mgr.vm01.nwhpas (mgr.14227) 408 : cluster [DBG] pgmap v215: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:57.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:56 vm04 bash[34817]: cluster 2026-04-16T19:27:55.562642+0000 mgr.vm01.nwhpas (mgr.14227) 408 : cluster [DBG] pgmap v215: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:57.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:56 vm04 bash[34817]: cluster 2026-04-16T19:27:55.562642+0000 mgr.vm01.nwhpas (mgr.14227) 408 : cluster [DBG] pgmap v215: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:59.196 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:27:59.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:58 vm04 bash[34817]: cluster 2026-04-16T19:27:57.563083+0000 mgr.vm01.nwhpas (mgr.14227) 409 : cluster [DBG] pgmap v216: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:59.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:58 vm04 bash[34817]: cluster 2026-04-16T19:27:57.563083+0000 mgr.vm01.nwhpas (mgr.14227) 409 : cluster [DBG] pgmap v216: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:59.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:58 vm01 bash[28222]: cluster 2026-04-16T19:27:57.563083+0000 mgr.vm01.nwhpas (mgr.14227) 409 : cluster [DBG] pgmap v216: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:59.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:58 vm01 bash[28222]: cluster 2026-04-16T19:27:57.563083+0000 mgr.vm01.nwhpas (mgr.14227) 409 : cluster [DBG] pgmap v216: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:27:59.378 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:27:59.378 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (4m) 14s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:27:59.378 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 14s ago 4m - - 2026-04-16T19:27:59.378 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 80s ago 4m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:27:59.378 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (4m) 80s ago 4m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:27:59.608 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:27:59.608 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:27:59.608 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:28:00.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:59 vm04 bash[34817]: audit 2026-04-16T19:27:59.604222+0000 mon.vm01 (mon.0) 1027 : audit [DBG] from='client.? 192.168.123.101:0/3102727586' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:00.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:27:59 vm04 bash[34817]: audit 2026-04-16T19:27:59.604222+0000 mon.vm01 (mon.0) 1027 : audit [DBG] from='client.? 192.168.123.101:0/3102727586' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:00.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:59 vm01 bash[28222]: audit 2026-04-16T19:27:59.604222+0000 mon.vm01 (mon.0) 1027 : audit [DBG] from='client.? 192.168.123.101:0/3102727586' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:00.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:27:59 vm01 bash[28222]: audit 2026-04-16T19:27:59.604222+0000 mon.vm01 (mon.0) 1027 : audit [DBG] from='client.? 192.168.123.101:0/3102727586' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:01.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:00 vm04 bash[34817]: audit 2026-04-16T19:27:59.174380+0000 mgr.vm01.nwhpas (mgr.14227) 410 : audit [DBG] from='client.15316 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:01.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:00 vm04 bash[34817]: audit 2026-04-16T19:27:59.174380+0000 mgr.vm01.nwhpas (mgr.14227) 410 : audit [DBG] from='client.15316 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:01.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:00 vm04 bash[34817]: audit 2026-04-16T19:27:59.371777+0000 mgr.vm01.nwhpas (mgr.14227) 411 : audit [DBG] from='client.15320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:01.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:00 vm04 bash[34817]: audit 2026-04-16T19:27:59.371777+0000 mgr.vm01.nwhpas (mgr.14227) 411 : audit [DBG] from='client.15320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:01.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:00 vm04 bash[34817]: cluster 2026-04-16T19:27:59.563592+0000 mgr.vm01.nwhpas (mgr.14227) 412 : cluster [DBG] pgmap v217: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:01.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:00 vm04 bash[34817]: cluster 2026-04-16T19:27:59.563592+0000 mgr.vm01.nwhpas (mgr.14227) 412 : cluster [DBG] pgmap v217: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:01.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:00 vm01 bash[28222]: audit 2026-04-16T19:27:59.174380+0000 mgr.vm01.nwhpas (mgr.14227) 410 : audit [DBG] from='client.15316 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:01.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:00 vm01 bash[28222]: audit 2026-04-16T19:27:59.174380+0000 mgr.vm01.nwhpas (mgr.14227) 410 : audit [DBG] from='client.15316 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:01.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:00 vm01 bash[28222]: audit 2026-04-16T19:27:59.371777+0000 mgr.vm01.nwhpas (mgr.14227) 411 : audit [DBG] from='client.15320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:01.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:00 vm01 bash[28222]: audit 2026-04-16T19:27:59.371777+0000 mgr.vm01.nwhpas (mgr.14227) 411 : audit [DBG] from='client.15320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:01.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:00 vm01 bash[28222]: cluster 2026-04-16T19:27:59.563592+0000 mgr.vm01.nwhpas (mgr.14227) 412 : cluster [DBG] pgmap v217: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:01.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:00 vm01 bash[28222]: cluster 2026-04-16T19:27:59.563592+0000 mgr.vm01.nwhpas (mgr.14227) 412 : cluster [DBG] pgmap v217: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:03.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:02 vm04 bash[34817]: cluster 2026-04-16T19:28:01.564038+0000 mgr.vm01.nwhpas (mgr.14227) 413 : cluster [DBG] pgmap v218: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:03.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:02 vm04 bash[34817]: cluster 2026-04-16T19:28:01.564038+0000 mgr.vm01.nwhpas (mgr.14227) 413 : cluster [DBG] pgmap v218: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:03.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:02 vm01 bash[28222]: cluster 2026-04-16T19:28:01.564038+0000 mgr.vm01.nwhpas (mgr.14227) 413 : cluster [DBG] pgmap v218: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:03.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:02 vm01 bash[28222]: cluster 2026-04-16T19:28:01.564038+0000 mgr.vm01.nwhpas (mgr.14227) 413 : cluster [DBG] pgmap v218: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:04.816 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:28:04.996 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:28:04.996 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (4m) 19s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:28:04.996 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 19s ago 4m - - 2026-04-16T19:28:04.996 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 86s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:28:04.996 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5m) 86s ago 5m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:28:05.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:04 vm04 bash[34817]: cluster 2026-04-16T19:28:03.564392+0000 mgr.vm01.nwhpas (mgr.14227) 414 : cluster [DBG] pgmap v219: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:05.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:04 vm04 bash[34817]: cluster 2026-04-16T19:28:03.564392+0000 mgr.vm01.nwhpas (mgr.14227) 414 : cluster [DBG] pgmap v219: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:05.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:04 vm01 bash[28222]: cluster 2026-04-16T19:28:03.564392+0000 mgr.vm01.nwhpas (mgr.14227) 414 : cluster [DBG] pgmap v219: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:05.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:04 vm01 bash[28222]: cluster 2026-04-16T19:28:03.564392+0000 mgr.vm01.nwhpas (mgr.14227) 414 : cluster [DBG] pgmap v219: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:05.231 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:28:05.232 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:28:05.232 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:28:06.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:05 vm04 bash[34817]: audit 2026-04-16T19:28:04.796625+0000 mgr.vm01.nwhpas (mgr.14227) 415 : audit [DBG] from='client.15328 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:06.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:05 vm04 bash[34817]: audit 2026-04-16T19:28:04.796625+0000 mgr.vm01.nwhpas (mgr.14227) 415 : audit [DBG] from='client.15328 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:06.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:05 vm04 bash[34817]: audit 2026-04-16T19:28:05.227850+0000 mon.vm01 (mon.0) 1028 : audit [DBG] from='client.? 192.168.123.101:0/1164217799' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:06.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:05 vm04 bash[34817]: audit 2026-04-16T19:28:05.227850+0000 mon.vm01 (mon.0) 1028 : audit [DBG] from='client.? 192.168.123.101:0/1164217799' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:06.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:05 vm01 bash[28222]: audit 2026-04-16T19:28:04.796625+0000 mgr.vm01.nwhpas (mgr.14227) 415 : audit [DBG] from='client.15328 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:06.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:05 vm01 bash[28222]: audit 2026-04-16T19:28:04.796625+0000 mgr.vm01.nwhpas (mgr.14227) 415 : audit [DBG] from='client.15328 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:06.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:05 vm01 bash[28222]: audit 2026-04-16T19:28:05.227850+0000 mon.vm01 (mon.0) 1028 : audit [DBG] from='client.? 192.168.123.101:0/1164217799' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:06.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:05 vm01 bash[28222]: audit 2026-04-16T19:28:05.227850+0000 mon.vm01 (mon.0) 1028 : audit [DBG] from='client.? 192.168.123.101:0/1164217799' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:06 vm01 bash[28222]: audit 2026-04-16T19:28:04.989472+0000 mgr.vm01.nwhpas (mgr.14227) 416 : audit [DBG] from='client.15332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:06 vm01 bash[28222]: audit 2026-04-16T19:28:04.989472+0000 mgr.vm01.nwhpas (mgr.14227) 416 : audit [DBG] from='client.15332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:06 vm01 bash[28222]: cluster 2026-04-16T19:28:05.564849+0000 mgr.vm01.nwhpas (mgr.14227) 417 : cluster [DBG] pgmap v220: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:06 vm01 bash[28222]: cluster 2026-04-16T19:28:05.564849+0000 mgr.vm01.nwhpas (mgr.14227) 417 : cluster [DBG] pgmap v220: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:07.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:06 vm04 bash[34817]: audit 2026-04-16T19:28:04.989472+0000 mgr.vm01.nwhpas (mgr.14227) 416 : audit [DBG] from='client.15332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:07.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:06 vm04 bash[34817]: audit 2026-04-16T19:28:04.989472+0000 mgr.vm01.nwhpas (mgr.14227) 416 : audit [DBG] from='client.15332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:07.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:06 vm04 bash[34817]: cluster 2026-04-16T19:28:05.564849+0000 mgr.vm01.nwhpas (mgr.14227) 417 : cluster [DBG] pgmap v220: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:07.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:06 vm04 bash[34817]: cluster 2026-04-16T19:28:05.564849+0000 mgr.vm01.nwhpas (mgr.14227) 417 : cluster [DBG] pgmap v220: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:08.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:07 vm04 bash[34817]: audit 2026-04-16T19:28:07.567204+0000 mon.vm01 (mon.0) 1029 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:08.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:07 vm04 bash[34817]: audit 2026-04-16T19:28:07.567204+0000 mon.vm01 (mon.0) 1029 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:08.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:07 vm01 bash[28222]: audit 2026-04-16T19:28:07.567204+0000 mon.vm01 (mon.0) 1029 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:08.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:07 vm01 bash[28222]: audit 2026-04-16T19:28:07.567204+0000 mon.vm01 (mon.0) 1029 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:09.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:08 vm04 bash[34817]: cluster 2026-04-16T19:28:07.565253+0000 mgr.vm01.nwhpas (mgr.14227) 418 : cluster [DBG] pgmap v221: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:09.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:08 vm04 bash[34817]: cluster 2026-04-16T19:28:07.565253+0000 mgr.vm01.nwhpas (mgr.14227) 418 : cluster [DBG] pgmap v221: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:09.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:08 vm01 bash[28222]: cluster 2026-04-16T19:28:07.565253+0000 mgr.vm01.nwhpas (mgr.14227) 418 : cluster [DBG] pgmap v221: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:09.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:08 vm01 bash[28222]: cluster 2026-04-16T19:28:07.565253+0000 mgr.vm01.nwhpas (mgr.14227) 418 : cluster [DBG] pgmap v221: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:10.442 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:28:10.627 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:28:10.627 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (4m) 25s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:28:10.627 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 25s ago 5m - - 2026-04-16T19:28:10.627 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 91s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:28:10.627 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5m) 91s ago 5m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:28:10.860 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:28:10.860 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:28:10.860 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:28:11.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:10 vm04 bash[34817]: cluster 2026-04-16T19:28:09.565665+0000 mgr.vm01.nwhpas (mgr.14227) 419 : cluster [DBG] pgmap v222: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:11.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:10 vm04 bash[34817]: cluster 2026-04-16T19:28:09.565665+0000 mgr.vm01.nwhpas (mgr.14227) 419 : cluster [DBG] pgmap v222: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:11.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:10 vm01 bash[28222]: cluster 2026-04-16T19:28:09.565665+0000 mgr.vm01.nwhpas (mgr.14227) 419 : cluster [DBG] pgmap v222: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:11.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:10 vm01 bash[28222]: cluster 2026-04-16T19:28:09.565665+0000 mgr.vm01.nwhpas (mgr.14227) 419 : cluster [DBG] pgmap v222: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:12.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:11 vm04 bash[34817]: audit 2026-04-16T19:28:10.418496+0000 mgr.vm01.nwhpas (mgr.14227) 420 : audit [DBG] from='client.15340 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:12.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:11 vm04 bash[34817]: audit 2026-04-16T19:28:10.418496+0000 mgr.vm01.nwhpas (mgr.14227) 420 : audit [DBG] from='client.15340 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:12.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:11 vm04 bash[34817]: audit 2026-04-16T19:28:10.620673+0000 mgr.vm01.nwhpas (mgr.14227) 421 : audit [DBG] from='client.15344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:12.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:11 vm04 bash[34817]: audit 2026-04-16T19:28:10.620673+0000 mgr.vm01.nwhpas (mgr.14227) 421 : audit [DBG] from='client.15344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:12.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:11 vm04 bash[34817]: audit 2026-04-16T19:28:10.856071+0000 mon.vm01 (mon.0) 1030 : audit [DBG] from='client.? 192.168.123.101:0/855437481' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:12.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:11 vm04 bash[34817]: audit 2026-04-16T19:28:10.856071+0000 mon.vm01 (mon.0) 1030 : audit [DBG] from='client.? 192.168.123.101:0/855437481' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:12.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:11 vm01 bash[28222]: audit 2026-04-16T19:28:10.418496+0000 mgr.vm01.nwhpas (mgr.14227) 420 : audit [DBG] from='client.15340 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:12.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:11 vm01 bash[28222]: audit 2026-04-16T19:28:10.418496+0000 mgr.vm01.nwhpas (mgr.14227) 420 : audit [DBG] from='client.15340 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:12.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:11 vm01 bash[28222]: audit 2026-04-16T19:28:10.620673+0000 mgr.vm01.nwhpas (mgr.14227) 421 : audit [DBG] from='client.15344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:12.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:11 vm01 bash[28222]: audit 2026-04-16T19:28:10.620673+0000 mgr.vm01.nwhpas (mgr.14227) 421 : audit [DBG] from='client.15344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:12.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:11 vm01 bash[28222]: audit 2026-04-16T19:28:10.856071+0000 mon.vm01 (mon.0) 1030 : audit [DBG] from='client.? 192.168.123.101:0/855437481' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:12.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:11 vm01 bash[28222]: audit 2026-04-16T19:28:10.856071+0000 mon.vm01 (mon.0) 1030 : audit [DBG] from='client.? 192.168.123.101:0/855437481' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:13.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:12 vm04 bash[34817]: cluster 2026-04-16T19:28:11.566113+0000 mgr.vm01.nwhpas (mgr.14227) 422 : cluster [DBG] pgmap v223: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:13.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:12 vm04 bash[34817]: cluster 2026-04-16T19:28:11.566113+0000 mgr.vm01.nwhpas (mgr.14227) 422 : cluster [DBG] pgmap v223: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:13.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:12 vm01 bash[28222]: cluster 2026-04-16T19:28:11.566113+0000 mgr.vm01.nwhpas (mgr.14227) 422 : cluster [DBG] pgmap v223: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:13.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:12 vm01 bash[28222]: cluster 2026-04-16T19:28:11.566113+0000 mgr.vm01.nwhpas (mgr.14227) 422 : cluster [DBG] pgmap v223: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:15.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:14 vm04 bash[34817]: cluster 2026-04-16T19:28:13.566490+0000 mgr.vm01.nwhpas (mgr.14227) 423 : cluster [DBG] pgmap v224: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:15.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:14 vm04 bash[34817]: cluster 2026-04-16T19:28:13.566490+0000 mgr.vm01.nwhpas (mgr.14227) 423 : cluster [DBG] pgmap v224: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:15.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:14 vm01 bash[28222]: cluster 2026-04-16T19:28:13.566490+0000 mgr.vm01.nwhpas (mgr.14227) 423 : cluster [DBG] pgmap v224: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:14 vm01 bash[28222]: cluster 2026-04-16T19:28:13.566490+0000 mgr.vm01.nwhpas (mgr.14227) 423 : cluster [DBG] pgmap v224: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:16.056 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:28:16.246 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:28:16.246 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (4m) 31s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:28:16.246 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 31s ago 5m - - 2026-04-16T19:28:16.246 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 97s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:28:16.246 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5m) 97s ago 5m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:28:16.476 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:28:16.476 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:28:16.476 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:28:17.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:16 vm04 bash[34817]: cluster 2026-04-16T19:28:15.566906+0000 mgr.vm01.nwhpas (mgr.14227) 424 : cluster [DBG] pgmap v225: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:17.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:16 vm04 bash[34817]: cluster 2026-04-16T19:28:15.566906+0000 mgr.vm01.nwhpas (mgr.14227) 424 : cluster [DBG] pgmap v225: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:17.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:16 vm04 bash[34817]: audit 2026-04-16T19:28:16.472742+0000 mon.vm01 (mon.0) 1031 : audit [DBG] from='client.? 192.168.123.101:0/3683968148' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:17.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:16 vm04 bash[34817]: audit 2026-04-16T19:28:16.472742+0000 mon.vm01 (mon.0) 1031 : audit [DBG] from='client.? 192.168.123.101:0/3683968148' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:17.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:16 vm01 bash[28222]: cluster 2026-04-16T19:28:15.566906+0000 mgr.vm01.nwhpas (mgr.14227) 424 : cluster [DBG] pgmap v225: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:17.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:16 vm01 bash[28222]: cluster 2026-04-16T19:28:15.566906+0000 mgr.vm01.nwhpas (mgr.14227) 424 : cluster [DBG] pgmap v225: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:17.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:16 vm01 bash[28222]: audit 2026-04-16T19:28:16.472742+0000 mon.vm01 (mon.0) 1031 : audit [DBG] from='client.? 192.168.123.101:0/3683968148' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:17.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:16 vm01 bash[28222]: audit 2026-04-16T19:28:16.472742+0000 mon.vm01 (mon.0) 1031 : audit [DBG] from='client.? 192.168.123.101:0/3683968148' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:18.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:17 vm04 bash[34817]: audit 2026-04-16T19:28:16.035641+0000 mgr.vm01.nwhpas (mgr.14227) 425 : audit [DBG] from='client.15352 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:18.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:17 vm04 bash[34817]: audit 2026-04-16T19:28:16.035641+0000 mgr.vm01.nwhpas (mgr.14227) 425 : audit [DBG] from='client.15352 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:18.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:17 vm04 bash[34817]: audit 2026-04-16T19:28:16.239664+0000 mgr.vm01.nwhpas (mgr.14227) 426 : audit [DBG] from='client.15356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:18.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:17 vm04 bash[34817]: audit 2026-04-16T19:28:16.239664+0000 mgr.vm01.nwhpas (mgr.14227) 426 : audit [DBG] from='client.15356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:18.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:17 vm01 bash[28222]: audit 2026-04-16T19:28:16.035641+0000 mgr.vm01.nwhpas (mgr.14227) 425 : audit [DBG] from='client.15352 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:18.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:17 vm01 bash[28222]: audit 2026-04-16T19:28:16.035641+0000 mgr.vm01.nwhpas (mgr.14227) 425 : audit [DBG] from='client.15352 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:18.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:17 vm01 bash[28222]: audit 2026-04-16T19:28:16.239664+0000 mgr.vm01.nwhpas (mgr.14227) 426 : audit [DBG] from='client.15356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:18.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:17 vm01 bash[28222]: audit 2026-04-16T19:28:16.239664+0000 mgr.vm01.nwhpas (mgr.14227) 426 : audit [DBG] from='client.15356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:19.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:18 vm04 bash[34817]: cluster 2026-04-16T19:28:17.567319+0000 mgr.vm01.nwhpas (mgr.14227) 427 : cluster [DBG] pgmap v226: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:19.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:18 vm04 bash[34817]: cluster 2026-04-16T19:28:17.567319+0000 mgr.vm01.nwhpas (mgr.14227) 427 : cluster [DBG] pgmap v226: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:19.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:18 vm01 bash[28222]: cluster 2026-04-16T19:28:17.567319+0000 mgr.vm01.nwhpas (mgr.14227) 427 : cluster [DBG] pgmap v226: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:19.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:18 vm01 bash[28222]: cluster 2026-04-16T19:28:17.567319+0000 mgr.vm01.nwhpas (mgr.14227) 427 : cluster [DBG] pgmap v226: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:21.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:20 vm04 bash[34817]: cluster 2026-04-16T19:28:19.567726+0000 mgr.vm01.nwhpas (mgr.14227) 428 : cluster [DBG] pgmap v227: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:21.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:20 vm04 bash[34817]: cluster 2026-04-16T19:28:19.567726+0000 mgr.vm01.nwhpas (mgr.14227) 428 : cluster [DBG] pgmap v227: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:21.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:20 vm01 bash[28222]: cluster 2026-04-16T19:28:19.567726+0000 mgr.vm01.nwhpas (mgr.14227) 428 : cluster [DBG] pgmap v227: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:21.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:20 vm01 bash[28222]: cluster 2026-04-16T19:28:19.567726+0000 mgr.vm01.nwhpas (mgr.14227) 428 : cluster [DBG] pgmap v227: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:21.681 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:28:21.862 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:28:21.863 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (4m) 36s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:28:21.863 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 36s ago 5m - - 2026-04-16T19:28:21.863 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 102s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:28:21.863 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5m) 102s ago 5m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:28:22.081 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:28:22.081 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:28:22.081 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:28:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:22 vm04 bash[34817]: cluster 2026-04-16T19:28:21.568133+0000 mgr.vm01.nwhpas (mgr.14227) 429 : cluster [DBG] pgmap v228: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:22 vm04 bash[34817]: cluster 2026-04-16T19:28:21.568133+0000 mgr.vm01.nwhpas (mgr.14227) 429 : cluster [DBG] pgmap v228: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:22 vm04 bash[34817]: audit 2026-04-16T19:28:21.661087+0000 mgr.vm01.nwhpas (mgr.14227) 430 : audit [DBG] from='client.15364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:22 vm04 bash[34817]: audit 2026-04-16T19:28:21.661087+0000 mgr.vm01.nwhpas (mgr.14227) 430 : audit [DBG] from='client.15364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:22 vm04 bash[34817]: audit 2026-04-16T19:28:21.856273+0000 mgr.vm01.nwhpas (mgr.14227) 431 : audit [DBG] from='client.15368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:22 vm04 bash[34817]: audit 2026-04-16T19:28:21.856273+0000 mgr.vm01.nwhpas (mgr.14227) 431 : audit [DBG] from='client.15368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:22 vm04 bash[34817]: audit 2026-04-16T19:28:22.077272+0000 mon.vm01 (mon.0) 1032 : audit [DBG] from='client.? 192.168.123.101:0/2070397909' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:22 vm04 bash[34817]: audit 2026-04-16T19:28:22.077272+0000 mon.vm01 (mon.0) 1032 : audit [DBG] from='client.? 192.168.123.101:0/2070397909' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:22 vm04 bash[34817]: audit 2026-04-16T19:28:22.567719+0000 mon.vm01 (mon.0) 1033 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:23.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:22 vm04 bash[34817]: audit 2026-04-16T19:28:22.567719+0000 mon.vm01 (mon.0) 1033 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:23.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:22 vm01 bash[28222]: cluster 2026-04-16T19:28:21.568133+0000 mgr.vm01.nwhpas (mgr.14227) 429 : cluster [DBG] pgmap v228: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:23.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:22 vm01 bash[28222]: cluster 2026-04-16T19:28:21.568133+0000 mgr.vm01.nwhpas (mgr.14227) 429 : cluster [DBG] pgmap v228: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:23.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:22 vm01 bash[28222]: audit 2026-04-16T19:28:21.661087+0000 mgr.vm01.nwhpas (mgr.14227) 430 : audit [DBG] from='client.15364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:23.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:22 vm01 bash[28222]: audit 2026-04-16T19:28:21.661087+0000 mgr.vm01.nwhpas (mgr.14227) 430 : audit [DBG] from='client.15364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:23.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:22 vm01 bash[28222]: audit 2026-04-16T19:28:21.856273+0000 mgr.vm01.nwhpas (mgr.14227) 431 : audit [DBG] from='client.15368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:22 vm01 bash[28222]: audit 2026-04-16T19:28:21.856273+0000 mgr.vm01.nwhpas (mgr.14227) 431 : audit [DBG] from='client.15368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:22 vm01 bash[28222]: audit 2026-04-16T19:28:22.077272+0000 mon.vm01 (mon.0) 1032 : audit [DBG] from='client.? 192.168.123.101:0/2070397909' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:22 vm01 bash[28222]: audit 2026-04-16T19:28:22.077272+0000 mon.vm01 (mon.0) 1032 : audit [DBG] from='client.? 192.168.123.101:0/2070397909' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:22 vm01 bash[28222]: audit 2026-04-16T19:28:22.567719+0000 mon.vm01 (mon.0) 1033 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:22 vm01 bash[28222]: audit 2026-04-16T19:28:22.567719+0000 mon.vm01 (mon.0) 1033 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:25.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:24 vm04 bash[34817]: cluster 2026-04-16T19:28:23.568541+0000 mgr.vm01.nwhpas (mgr.14227) 432 : cluster [DBG] pgmap v229: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:25.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:24 vm04 bash[34817]: cluster 2026-04-16T19:28:23.568541+0000 mgr.vm01.nwhpas (mgr.14227) 432 : cluster [DBG] pgmap v229: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:25.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:24 vm01 bash[28222]: cluster 2026-04-16T19:28:23.568541+0000 mgr.vm01.nwhpas (mgr.14227) 432 : cluster [DBG] pgmap v229: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:25.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:24 vm01 bash[28222]: cluster 2026-04-16T19:28:23.568541+0000 mgr.vm01.nwhpas (mgr.14227) 432 : cluster [DBG] pgmap v229: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:26.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:25 vm04 bash[34817]: cluster 2026-04-16T19:28:25.569042+0000 mgr.vm01.nwhpas (mgr.14227) 433 : cluster [DBG] pgmap v230: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:26.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:25 vm04 bash[34817]: cluster 2026-04-16T19:28:25.569042+0000 mgr.vm01.nwhpas (mgr.14227) 433 : cluster [DBG] pgmap v230: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:26.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:25 vm01 bash[28222]: cluster 2026-04-16T19:28:25.569042+0000 mgr.vm01.nwhpas (mgr.14227) 433 : cluster [DBG] pgmap v230: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:26.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:25 vm01 bash[28222]: cluster 2026-04-16T19:28:25.569042+0000 mgr.vm01.nwhpas (mgr.14227) 433 : cluster [DBG] pgmap v230: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:27.310 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:28:27.543 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:28:27.543 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (4m) 42s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:28:27.543 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 42s ago 5m - - 2026-04-16T19:28:27.543 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 108s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:28:27.543 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5m) 108s ago 5m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:28:27.787 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:28:27.787 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:28:27.787 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:28:28.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:28 vm04 bash[34817]: audit 2026-04-16T19:28:27.288613+0000 mgr.vm01.nwhpas (mgr.14227) 434 : audit [DBG] from='client.15376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:28.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:28 vm04 bash[34817]: audit 2026-04-16T19:28:27.288613+0000 mgr.vm01.nwhpas (mgr.14227) 434 : audit [DBG] from='client.15376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:28.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:28 vm04 bash[34817]: audit 2026-04-16T19:28:27.536764+0000 mgr.vm01.nwhpas (mgr.14227) 435 : audit [DBG] from='client.15380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:28.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:28 vm04 bash[34817]: audit 2026-04-16T19:28:27.536764+0000 mgr.vm01.nwhpas (mgr.14227) 435 : audit [DBG] from='client.15380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:28.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:28 vm04 bash[34817]: cluster 2026-04-16T19:28:27.569534+0000 mgr.vm01.nwhpas (mgr.14227) 436 : cluster [DBG] pgmap v231: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:28.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:28 vm04 bash[34817]: cluster 2026-04-16T19:28:27.569534+0000 mgr.vm01.nwhpas (mgr.14227) 436 : cluster [DBG] pgmap v231: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:28.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:28 vm04 bash[34817]: audit 2026-04-16T19:28:27.783176+0000 mon.vm01 (mon.0) 1034 : audit [DBG] from='client.? 192.168.123.101:0/3423599916' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:28.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:28 vm04 bash[34817]: audit 2026-04-16T19:28:27.783176+0000 mon.vm01 (mon.0) 1034 : audit [DBG] from='client.? 192.168.123.101:0/3423599916' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:28.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:28 vm01 bash[28222]: audit 2026-04-16T19:28:27.288613+0000 mgr.vm01.nwhpas (mgr.14227) 434 : audit [DBG] from='client.15376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:28.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:28 vm01 bash[28222]: audit 2026-04-16T19:28:27.288613+0000 mgr.vm01.nwhpas (mgr.14227) 434 : audit [DBG] from='client.15376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:28.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:28 vm01 bash[28222]: audit 2026-04-16T19:28:27.536764+0000 mgr.vm01.nwhpas (mgr.14227) 435 : audit [DBG] from='client.15380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:28.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:28 vm01 bash[28222]: audit 2026-04-16T19:28:27.536764+0000 mgr.vm01.nwhpas (mgr.14227) 435 : audit [DBG] from='client.15380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:28.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:28 vm01 bash[28222]: cluster 2026-04-16T19:28:27.569534+0000 mgr.vm01.nwhpas (mgr.14227) 436 : cluster [DBG] pgmap v231: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:28.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:28 vm01 bash[28222]: cluster 2026-04-16T19:28:27.569534+0000 mgr.vm01.nwhpas (mgr.14227) 436 : cluster [DBG] pgmap v231: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:28.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:28 vm01 bash[28222]: audit 2026-04-16T19:28:27.783176+0000 mon.vm01 (mon.0) 1034 : audit [DBG] from='client.? 192.168.123.101:0/3423599916' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:28.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:28 vm01 bash[28222]: audit 2026-04-16T19:28:27.783176+0000 mon.vm01 (mon.0) 1034 : audit [DBG] from='client.? 192.168.123.101:0/3423599916' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:30.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:30 vm04 bash[34817]: cluster 2026-04-16T19:28:29.570024+0000 mgr.vm01.nwhpas (mgr.14227) 437 : cluster [DBG] pgmap v232: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:30.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:30 vm04 bash[34817]: cluster 2026-04-16T19:28:29.570024+0000 mgr.vm01.nwhpas (mgr.14227) 437 : cluster [DBG] pgmap v232: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:30.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:30 vm01 bash[28222]: cluster 2026-04-16T19:28:29.570024+0000 mgr.vm01.nwhpas (mgr.14227) 437 : cluster [DBG] pgmap v232: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:30.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:30 vm01 bash[28222]: cluster 2026-04-16T19:28:29.570024+0000 mgr.vm01.nwhpas (mgr.14227) 437 : cluster [DBG] pgmap v232: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:32.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:32 vm04 bash[34817]: cluster 2026-04-16T19:28:31.570474+0000 mgr.vm01.nwhpas (mgr.14227) 438 : cluster [DBG] pgmap v233: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:32.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:32 vm04 bash[34817]: cluster 2026-04-16T19:28:31.570474+0000 mgr.vm01.nwhpas (mgr.14227) 438 : cluster [DBG] pgmap v233: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:32.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:32 vm01 bash[28222]: cluster 2026-04-16T19:28:31.570474+0000 mgr.vm01.nwhpas (mgr.14227) 438 : cluster [DBG] pgmap v233: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:32.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:32 vm01 bash[28222]: cluster 2026-04-16T19:28:31.570474+0000 mgr.vm01.nwhpas (mgr.14227) 438 : cluster [DBG] pgmap v233: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:33.000 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:28:33.195 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:28:33.195 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (4m) 47s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:28:33.195 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 47s ago 5m - - 2026-04-16T19:28:33.195 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 114s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:28:33.195 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5m) 114s ago 5m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:28:33.423 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:28:33.423 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:28:33.423 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:28:33.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:33 vm04 bash[34817]: audit 2026-04-16T19:28:33.419286+0000 mon.vm01 (mon.0) 1035 : audit [DBG] from='client.? 192.168.123.101:0/4251438923' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:33.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:33 vm04 bash[34817]: audit 2026-04-16T19:28:33.419286+0000 mon.vm01 (mon.0) 1035 : audit [DBG] from='client.? 192.168.123.101:0/4251438923' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:33.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:33 vm01 bash[28222]: audit 2026-04-16T19:28:33.419286+0000 mon.vm01 (mon.0) 1035 : audit [DBG] from='client.? 192.168.123.101:0/4251438923' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:33.961 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:33 vm01 bash[28222]: audit 2026-04-16T19:28:33.419286+0000 mon.vm01 (mon.0) 1035 : audit [DBG] from='client.? 192.168.123.101:0/4251438923' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:34.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:34 vm04 bash[34817]: audit 2026-04-16T19:28:32.982502+0000 mgr.vm01.nwhpas (mgr.14227) 439 : audit [DBG] from='client.15388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:34.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:34 vm04 bash[34817]: audit 2026-04-16T19:28:32.982502+0000 mgr.vm01.nwhpas (mgr.14227) 439 : audit [DBG] from='client.15388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:34.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:34 vm04 bash[34817]: audit 2026-04-16T19:28:33.188605+0000 mgr.vm01.nwhpas (mgr.14227) 440 : audit [DBG] from='client.15392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:34.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:34 vm04 bash[34817]: audit 2026-04-16T19:28:33.188605+0000 mgr.vm01.nwhpas (mgr.14227) 440 : audit [DBG] from='client.15392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:34.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:34 vm04 bash[34817]: cluster 2026-04-16T19:28:33.570865+0000 mgr.vm01.nwhpas (mgr.14227) 441 : cluster [DBG] pgmap v234: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:34.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:34 vm04 bash[34817]: cluster 2026-04-16T19:28:33.570865+0000 mgr.vm01.nwhpas (mgr.14227) 441 : cluster [DBG] pgmap v234: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:34.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:34 vm01 bash[28222]: audit 2026-04-16T19:28:32.982502+0000 mgr.vm01.nwhpas (mgr.14227) 439 : audit [DBG] from='client.15388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:34.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:34 vm01 bash[28222]: audit 2026-04-16T19:28:32.982502+0000 mgr.vm01.nwhpas (mgr.14227) 439 : audit [DBG] from='client.15388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:34.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:34 vm01 bash[28222]: audit 2026-04-16T19:28:33.188605+0000 mgr.vm01.nwhpas (mgr.14227) 440 : audit [DBG] from='client.15392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:34.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:34 vm01 bash[28222]: audit 2026-04-16T19:28:33.188605+0000 mgr.vm01.nwhpas (mgr.14227) 440 : audit [DBG] from='client.15392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:34.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:34 vm01 bash[28222]: cluster 2026-04-16T19:28:33.570865+0000 mgr.vm01.nwhpas (mgr.14227) 441 : cluster [DBG] pgmap v234: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:34.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:34 vm01 bash[28222]: cluster 2026-04-16T19:28:33.570865+0000 mgr.vm01.nwhpas (mgr.14227) 441 : cluster [DBG] pgmap v234: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:36.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:36 vm01 bash[28222]: cluster 2026-04-16T19:28:35.571284+0000 mgr.vm01.nwhpas (mgr.14227) 442 : cluster [DBG] pgmap v235: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:36.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:36 vm01 bash[28222]: cluster 2026-04-16T19:28:35.571284+0000 mgr.vm01.nwhpas (mgr.14227) 442 : cluster [DBG] pgmap v235: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:37.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:36 vm04 bash[34817]: cluster 2026-04-16T19:28:35.571284+0000 mgr.vm01.nwhpas (mgr.14227) 442 : cluster [DBG] pgmap v235: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:37.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:36 vm04 bash[34817]: cluster 2026-04-16T19:28:35.571284+0000 mgr.vm01.nwhpas (mgr.14227) 442 : cluster [DBG] pgmap v235: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:37 vm04 bash[34817]: audit 2026-04-16T19:28:37.567685+0000 mon.vm01 (mon.0) 1036 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:38.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:37 vm04 bash[34817]: audit 2026-04-16T19:28:37.567685+0000 mon.vm01 (mon.0) 1036 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:37 vm01 bash[28222]: audit 2026-04-16T19:28:37.567685+0000 mon.vm01 (mon.0) 1036 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:37 vm01 bash[28222]: audit 2026-04-16T19:28:37.567685+0000 mon.vm01 (mon.0) 1036 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:38.627 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:28:38.813 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:28:38.813 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (4m) 53s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:28:38.813 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 53s ago 5m - - 2026-04-16T19:28:38.813 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 119s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:28:38.813 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5m) 119s ago 5m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:28:39.066 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:28:39.066 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:28:39.066 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:28:39.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:38 vm04 bash[34817]: cluster 2026-04-16T19:28:37.571632+0000 mgr.vm01.nwhpas (mgr.14227) 443 : cluster [DBG] pgmap v236: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:39.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:38 vm04 bash[34817]: cluster 2026-04-16T19:28:37.571632+0000 mgr.vm01.nwhpas (mgr.14227) 443 : cluster [DBG] pgmap v236: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:38 vm01 bash[28222]: cluster 2026-04-16T19:28:37.571632+0000 mgr.vm01.nwhpas (mgr.14227) 443 : cluster [DBG] pgmap v236: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:38 vm01 bash[28222]: cluster 2026-04-16T19:28:37.571632+0000 mgr.vm01.nwhpas (mgr.14227) 443 : cluster [DBG] pgmap v236: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:40.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:39 vm04 bash[34817]: audit 2026-04-16T19:28:38.601820+0000 mgr.vm01.nwhpas (mgr.14227) 444 : audit [DBG] from='client.15400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:40.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:39 vm04 bash[34817]: audit 2026-04-16T19:28:38.601820+0000 mgr.vm01.nwhpas (mgr.14227) 444 : audit [DBG] from='client.15400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:40.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:39 vm04 bash[34817]: audit 2026-04-16T19:28:38.806258+0000 mgr.vm01.nwhpas (mgr.14227) 445 : audit [DBG] from='client.15404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:40.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:39 vm04 bash[34817]: audit 2026-04-16T19:28:38.806258+0000 mgr.vm01.nwhpas (mgr.14227) 445 : audit [DBG] from='client.15404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:40.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:39 vm04 bash[34817]: audit 2026-04-16T19:28:39.062205+0000 mon.vm01 (mon.0) 1037 : audit [DBG] from='client.? 192.168.123.101:0/4075061530' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:40.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:39 vm04 bash[34817]: audit 2026-04-16T19:28:39.062205+0000 mon.vm01 (mon.0) 1037 : audit [DBG] from='client.? 192.168.123.101:0/4075061530' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:39 vm01 bash[28222]: audit 2026-04-16T19:28:38.601820+0000 mgr.vm01.nwhpas (mgr.14227) 444 : audit [DBG] from='client.15400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:39 vm01 bash[28222]: audit 2026-04-16T19:28:38.601820+0000 mgr.vm01.nwhpas (mgr.14227) 444 : audit [DBG] from='client.15400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:39 vm01 bash[28222]: audit 2026-04-16T19:28:38.806258+0000 mgr.vm01.nwhpas (mgr.14227) 445 : audit [DBG] from='client.15404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:39 vm01 bash[28222]: audit 2026-04-16T19:28:38.806258+0000 mgr.vm01.nwhpas (mgr.14227) 445 : audit [DBG] from='client.15404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:39 vm01 bash[28222]: audit 2026-04-16T19:28:39.062205+0000 mon.vm01 (mon.0) 1037 : audit [DBG] from='client.? 192.168.123.101:0/4075061530' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:39 vm01 bash[28222]: audit 2026-04-16T19:28:39.062205+0000 mon.vm01 (mon.0) 1037 : audit [DBG] from='client.? 192.168.123.101:0/4075061530' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:41.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:40 vm04 bash[34817]: cluster 2026-04-16T19:28:39.572055+0000 mgr.vm01.nwhpas (mgr.14227) 446 : cluster [DBG] pgmap v237: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:41.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:40 vm04 bash[34817]: cluster 2026-04-16T19:28:39.572055+0000 mgr.vm01.nwhpas (mgr.14227) 446 : cluster [DBG] pgmap v237: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:41.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:40 vm01 bash[28222]: cluster 2026-04-16T19:28:39.572055+0000 mgr.vm01.nwhpas (mgr.14227) 446 : cluster [DBG] pgmap v237: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:41.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:40 vm01 bash[28222]: cluster 2026-04-16T19:28:39.572055+0000 mgr.vm01.nwhpas (mgr.14227) 446 : cluster [DBG] pgmap v237: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:28:43.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:42 vm04 bash[34817]: cluster 2026-04-16T19:28:41.572476+0000 mgr.vm01.nwhpas (mgr.14227) 447 : cluster [DBG] pgmap v238: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:43.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:42 vm04 bash[34817]: cluster 2026-04-16T19:28:41.572476+0000 mgr.vm01.nwhpas (mgr.14227) 447 : cluster [DBG] pgmap v238: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:43.211 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:42 vm01 bash[28222]: cluster 2026-04-16T19:28:41.572476+0000 mgr.vm01.nwhpas (mgr.14227) 447 : cluster [DBG] pgmap v238: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:43.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:42 vm01 bash[28222]: cluster 2026-04-16T19:28:41.572476+0000 mgr.vm01.nwhpas (mgr.14227) 447 : cluster [DBG] pgmap v238: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:44.276 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:28:44.463 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:28:44.463 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (4m) 59s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:28:44.463 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 59s ago 5m - - 2026-04-16T19:28:44.463 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 2m ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:28:44.463 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5m) 2m ago 5m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:28:44.697 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:28:44.697 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:28:44.697 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:28:45.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:44 vm04 bash[34817]: cluster 2026-04-16T19:28:43.572898+0000 mgr.vm01.nwhpas (mgr.14227) 448 : cluster [DBG] pgmap v239: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:45.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:44 vm04 bash[34817]: cluster 2026-04-16T19:28:43.572898+0000 mgr.vm01.nwhpas (mgr.14227) 448 : cluster [DBG] pgmap v239: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:45.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:44 vm04 bash[34817]: audit 2026-04-16T19:28:44.691765+0000 mon.vm01 (mon.0) 1038 : audit [DBG] from='client.? 192.168.123.101:0/2903508059' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:45.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:44 vm04 bash[34817]: audit 2026-04-16T19:28:44.691765+0000 mon.vm01 (mon.0) 1038 : audit [DBG] from='client.? 192.168.123.101:0/2903508059' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:45.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:44 vm01 bash[28222]: cluster 2026-04-16T19:28:43.572898+0000 mgr.vm01.nwhpas (mgr.14227) 448 : cluster [DBG] pgmap v239: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:45.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:44 vm01 bash[28222]: cluster 2026-04-16T19:28:43.572898+0000 mgr.vm01.nwhpas (mgr.14227) 448 : cluster [DBG] pgmap v239: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:45.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:44 vm01 bash[28222]: audit 2026-04-16T19:28:44.691765+0000 mon.vm01 (mon.0) 1038 : audit [DBG] from='client.? 192.168.123.101:0/2903508059' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:45.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:44 vm01 bash[28222]: audit 2026-04-16T19:28:44.691765+0000 mon.vm01 (mon.0) 1038 : audit [DBG] from='client.? 192.168.123.101:0/2903508059' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:45 vm01 bash[28222]: audit 2026-04-16T19:28:44.254674+0000 mgr.vm01.nwhpas (mgr.14227) 449 : audit [DBG] from='client.15412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:45 vm01 bash[28222]: audit 2026-04-16T19:28:44.254674+0000 mgr.vm01.nwhpas (mgr.14227) 449 : audit [DBG] from='client.15412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:45 vm01 bash[28222]: audit 2026-04-16T19:28:44.455561+0000 mgr.vm01.nwhpas (mgr.14227) 450 : audit [DBG] from='client.15416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:45 vm01 bash[28222]: audit 2026-04-16T19:28:44.455561+0000 mgr.vm01.nwhpas (mgr.14227) 450 : audit [DBG] from='client.15416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:45 vm01 bash[28222]: audit 2026-04-16T19:28:45.581386+0000 mon.vm01 (mon.0) 1039 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:28:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:45 vm01 bash[28222]: audit 2026-04-16T19:28:45.581386+0000 mon.vm01 (mon.0) 1039 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:28:46.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:45 vm04 bash[34817]: audit 2026-04-16T19:28:44.254674+0000 mgr.vm01.nwhpas (mgr.14227) 449 : audit [DBG] from='client.15412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:46.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:45 vm04 bash[34817]: audit 2026-04-16T19:28:44.254674+0000 mgr.vm01.nwhpas (mgr.14227) 449 : audit [DBG] from='client.15412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:46.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:45 vm04 bash[34817]: audit 2026-04-16T19:28:44.455561+0000 mgr.vm01.nwhpas (mgr.14227) 450 : audit [DBG] from='client.15416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:46.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:45 vm04 bash[34817]: audit 2026-04-16T19:28:44.455561+0000 mgr.vm01.nwhpas (mgr.14227) 450 : audit [DBG] from='client.15416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:46.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:45 vm04 bash[34817]: audit 2026-04-16T19:28:45.581386+0000 mon.vm01 (mon.0) 1039 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:28:46.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:45 vm04 bash[34817]: audit 2026-04-16T19:28:45.581386+0000 mon.vm01 (mon.0) 1039 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:28:47.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:46 vm04 bash[34817]: cluster 2026-04-16T19:28:45.573339+0000 mgr.vm01.nwhpas (mgr.14227) 451 : cluster [DBG] pgmap v240: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:47.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:46 vm04 bash[34817]: cluster 2026-04-16T19:28:45.573339+0000 mgr.vm01.nwhpas (mgr.14227) 451 : cluster [DBG] pgmap v240: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:47.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:46 vm01 bash[28222]: cluster 2026-04-16T19:28:45.573339+0000 mgr.vm01.nwhpas (mgr.14227) 451 : cluster [DBG] pgmap v240: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:47.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:46 vm01 bash[28222]: cluster 2026-04-16T19:28:45.573339+0000 mgr.vm01.nwhpas (mgr.14227) 451 : cluster [DBG] pgmap v240: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:48.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:47 vm04 bash[34817]: cluster 2026-04-16T19:28:47.573746+0000 mgr.vm01.nwhpas (mgr.14227) 452 : cluster [DBG] pgmap v241: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:48.458 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:47 vm04 bash[34817]: cluster 2026-04-16T19:28:47.573746+0000 mgr.vm01.nwhpas (mgr.14227) 452 : cluster [DBG] pgmap v241: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:48.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:47 vm01 bash[28222]: cluster 2026-04-16T19:28:47.573746+0000 mgr.vm01.nwhpas (mgr.14227) 452 : cluster [DBG] pgmap v241: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:48.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:47 vm01 bash[28222]: cluster 2026-04-16T19:28:47.573746+0000 mgr.vm01.nwhpas (mgr.14227) 452 : cluster [DBG] pgmap v241: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:49.915 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:28:50.112 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:28:50.112 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (4m) 64s ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:28:50.112 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 64s ago 5m - - 2026-04-16T19:28:50.112 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 2m ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:28:50.112 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5m) 2m ago 5m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:28:50.355 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:28:50.355 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:28:50.355 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:28:50.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:50 vm04 bash[34817]: cluster 2026-04-16T19:28:49.574168+0000 mgr.vm01.nwhpas (mgr.14227) 453 : cluster [DBG] pgmap v242: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:50.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:50 vm04 bash[34817]: cluster 2026-04-16T19:28:49.574168+0000 mgr.vm01.nwhpas (mgr.14227) 453 : cluster [DBG] pgmap v242: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:50.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:50 vm04 bash[34817]: audit 2026-04-16T19:28:50.350662+0000 mon.vm01 (mon.0) 1040 : audit [DBG] from='client.? 192.168.123.101:0/4150959666' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:50.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:50 vm04 bash[34817]: audit 2026-04-16T19:28:50.350662+0000 mon.vm01 (mon.0) 1040 : audit [DBG] from='client.? 192.168.123.101:0/4150959666' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:50.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:50 vm01 bash[28222]: cluster 2026-04-16T19:28:49.574168+0000 mgr.vm01.nwhpas (mgr.14227) 453 : cluster [DBG] pgmap v242: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:50.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:50 vm01 bash[28222]: cluster 2026-04-16T19:28:49.574168+0000 mgr.vm01.nwhpas (mgr.14227) 453 : cluster [DBG] pgmap v242: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:28:50.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:50 vm01 bash[28222]: audit 2026-04-16T19:28:50.350662+0000 mon.vm01 (mon.0) 1040 : audit [DBG] from='client.? 192.168.123.101:0/4150959666' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:50.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:50 vm01 bash[28222]: audit 2026-04-16T19:28:50.350662+0000 mon.vm01 (mon.0) 1040 : audit [DBG] from='client.? 192.168.123.101:0/4150959666' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:51.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:49.894716+0000 mgr.vm01.nwhpas (mgr.14227) 454 : audit [DBG] from='client.15424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:51.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:49.894716+0000 mgr.vm01.nwhpas (mgr.14227) 454 : audit [DBG] from='client.15424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:51.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:50.104783+0000 mgr.vm01.nwhpas (mgr.14227) 455 : audit [DBG] from='client.15428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:51.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:50.104783+0000 mgr.vm01.nwhpas (mgr.14227) 455 : audit [DBG] from='client.15428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:51.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:50.903305+0000 mon.vm01 (mon.0) 1041 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:50.903305+0000 mon.vm01 (mon.0) 1041 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:50.908386+0000 mon.vm01 (mon.0) 1042 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:50.908386+0000 mon.vm01 (mon.0) 1042 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:51.237136+0000 mon.vm01 (mon.0) 1043 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:28:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:51.237136+0000 mon.vm01 (mon.0) 1043 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:28:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:51.237657+0000 mon.vm01 (mon.0) 1044 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:28:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:51.237657+0000 mon.vm01 (mon.0) 1044 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:28:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:51.242563+0000 mon.vm01 (mon.0) 1045 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:51.242563+0000 mon.vm01 (mon.0) 1045 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:51.244053+0000 mon.vm01 (mon.0) 1046 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:28:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:51 vm04 bash[34817]: audit 2026-04-16T19:28:51.244053+0000 mon.vm01 (mon.0) 1046 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:49.894716+0000 mgr.vm01.nwhpas (mgr.14227) 454 : audit [DBG] from='client.15424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:49.894716+0000 mgr.vm01.nwhpas (mgr.14227) 454 : audit [DBG] from='client.15424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:50.104783+0000 mgr.vm01.nwhpas (mgr.14227) 455 : audit [DBG] from='client.15428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:50.104783+0000 mgr.vm01.nwhpas (mgr.14227) 455 : audit [DBG] from='client.15428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:50.903305+0000 mon.vm01 (mon.0) 1041 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:50.903305+0000 mon.vm01 (mon.0) 1041 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:50.908386+0000 mon.vm01 (mon.0) 1042 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:50.908386+0000 mon.vm01 (mon.0) 1042 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:51.237136+0000 mon.vm01 (mon.0) 1043 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:51.237136+0000 mon.vm01 (mon.0) 1043 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:51.237657+0000 mon.vm01 (mon.0) 1044 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:51.237657+0000 mon.vm01 (mon.0) 1044 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:51.242563+0000 mon.vm01 (mon.0) 1045 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:51.242563+0000 mon.vm01 (mon.0) 1045 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:51.244053+0000 mon.vm01 (mon.0) 1046 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:28:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:51 vm01 bash[28222]: audit 2026-04-16T19:28:51.244053+0000 mon.vm01 (mon.0) 1046 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:28:52.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:52 vm01 bash[28222]: cluster 2026-04-16T19:28:51.238682+0000 mgr.vm01.nwhpas (mgr.14227) 456 : cluster [DBG] pgmap v243: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-16T19:28:52.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:52 vm01 bash[28222]: cluster 2026-04-16T19:28:51.238682+0000 mgr.vm01.nwhpas (mgr.14227) 456 : cluster [DBG] pgmap v243: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-16T19:28:52.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:52 vm01 bash[28222]: audit 2026-04-16T19:28:52.567961+0000 mon.vm01 (mon.0) 1047 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:52.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:52 vm01 bash[28222]: audit 2026-04-16T19:28:52.567961+0000 mon.vm01 (mon.0) 1047 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:52.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:52 vm04 bash[34817]: cluster 2026-04-16T19:28:51.238682+0000 mgr.vm01.nwhpas (mgr.14227) 456 : cluster [DBG] pgmap v243: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-16T19:28:52.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:52 vm04 bash[34817]: cluster 2026-04-16T19:28:51.238682+0000 mgr.vm01.nwhpas (mgr.14227) 456 : cluster [DBG] pgmap v243: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-16T19:28:52.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:52 vm04 bash[34817]: audit 2026-04-16T19:28:52.567961+0000 mon.vm01 (mon.0) 1047 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:52.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:52 vm04 bash[34817]: audit 2026-04-16T19:28:52.567961+0000 mon.vm01 (mon.0) 1047 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:28:54.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:54 vm04 bash[34817]: cluster 2026-04-16T19:28:53.239149+0000 mgr.vm01.nwhpas (mgr.14227) 457 : cluster [DBG] pgmap v244: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-16T19:28:54.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:54 vm04 bash[34817]: cluster 2026-04-16T19:28:53.239149+0000 mgr.vm01.nwhpas (mgr.14227) 457 : cluster [DBG] pgmap v244: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-16T19:28:54.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:54 vm01 bash[28222]: cluster 2026-04-16T19:28:53.239149+0000 mgr.vm01.nwhpas (mgr.14227) 457 : cluster [DBG] pgmap v244: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-16T19:28:54.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:54 vm01 bash[28222]: cluster 2026-04-16T19:28:53.239149+0000 mgr.vm01.nwhpas (mgr.14227) 457 : cluster [DBG] pgmap v244: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 175 B/s rd, 351 B/s wr, 0 op/s 2026-04-16T19:28:55.565 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:28:55.745 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:28:55.745 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 4s ago 5m 115M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:28:55.745 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 4s ago 5m - - 2026-04-16T19:28:55.745 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 2m ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:28:55.745 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5m) 2m ago 5m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:28:55.969 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:28:55.969 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:28:55.969 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:28:56.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:56 vm04 bash[34817]: cluster 2026-04-16T19:28:55.239588+0000 mgr.vm01.nwhpas (mgr.14227) 458 : cluster [DBG] pgmap v245: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:56.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:56 vm04 bash[34817]: cluster 2026-04-16T19:28:55.239588+0000 mgr.vm01.nwhpas (mgr.14227) 458 : cluster [DBG] pgmap v245: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:56.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:56 vm04 bash[34817]: audit 2026-04-16T19:28:55.545215+0000 mgr.vm01.nwhpas (mgr.14227) 459 : audit [DBG] from='client.15436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:56.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:56 vm04 bash[34817]: audit 2026-04-16T19:28:55.545215+0000 mgr.vm01.nwhpas (mgr.14227) 459 : audit [DBG] from='client.15436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:56.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:56 vm04 bash[34817]: audit 2026-04-16T19:28:55.738459+0000 mgr.vm01.nwhpas (mgr.14227) 460 : audit [DBG] from='client.15440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:56.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:56 vm04 bash[34817]: audit 2026-04-16T19:28:55.738459+0000 mgr.vm01.nwhpas (mgr.14227) 460 : audit [DBG] from='client.15440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:56.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:56 vm04 bash[34817]: audit 2026-04-16T19:28:55.965461+0000 mon.vm01 (mon.0) 1048 : audit [DBG] from='client.? 192.168.123.101:0/1853296729' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:56.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:56 vm04 bash[34817]: audit 2026-04-16T19:28:55.965461+0000 mon.vm01 (mon.0) 1048 : audit [DBG] from='client.? 192.168.123.101:0/1853296729' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:56.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:56 vm01 bash[28222]: cluster 2026-04-16T19:28:55.239588+0000 mgr.vm01.nwhpas (mgr.14227) 458 : cluster [DBG] pgmap v245: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:56.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:56 vm01 bash[28222]: cluster 2026-04-16T19:28:55.239588+0000 mgr.vm01.nwhpas (mgr.14227) 458 : cluster [DBG] pgmap v245: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:56.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:56 vm01 bash[28222]: audit 2026-04-16T19:28:55.545215+0000 mgr.vm01.nwhpas (mgr.14227) 459 : audit [DBG] from='client.15436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:56.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:56 vm01 bash[28222]: audit 2026-04-16T19:28:55.545215+0000 mgr.vm01.nwhpas (mgr.14227) 459 : audit [DBG] from='client.15436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:56.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:56 vm01 bash[28222]: audit 2026-04-16T19:28:55.738459+0000 mgr.vm01.nwhpas (mgr.14227) 460 : audit [DBG] from='client.15440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:56.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:56 vm01 bash[28222]: audit 2026-04-16T19:28:55.738459+0000 mgr.vm01.nwhpas (mgr.14227) 460 : audit [DBG] from='client.15440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:28:56.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:56 vm01 bash[28222]: audit 2026-04-16T19:28:55.965461+0000 mon.vm01 (mon.0) 1048 : audit [DBG] from='client.? 192.168.123.101:0/1853296729' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:56.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:56 vm01 bash[28222]: audit 2026-04-16T19:28:55.965461+0000 mon.vm01 (mon.0) 1048 : audit [DBG] from='client.? 192.168.123.101:0/1853296729' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:28:58.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:58 vm04 bash[34817]: cluster 2026-04-16T19:28:57.239977+0000 mgr.vm01.nwhpas (mgr.14227) 461 : cluster [DBG] pgmap v246: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:58.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:28:58 vm04 bash[34817]: cluster 2026-04-16T19:28:57.239977+0000 mgr.vm01.nwhpas (mgr.14227) 461 : cluster [DBG] pgmap v246: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:58.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:58 vm01 bash[28222]: cluster 2026-04-16T19:28:57.239977+0000 mgr.vm01.nwhpas (mgr.14227) 461 : cluster [DBG] pgmap v246: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:28:58.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:28:58 vm01 bash[28222]: cluster 2026-04-16T19:28:57.239977+0000 mgr.vm01.nwhpas (mgr.14227) 461 : cluster [DBG] pgmap v246: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:00.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:00 vm04 bash[34817]: cluster 2026-04-16T19:28:59.240343+0000 mgr.vm01.nwhpas (mgr.14227) 462 : cluster [DBG] pgmap v247: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:29:00.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:00 vm04 bash[34817]: cluster 2026-04-16T19:28:59.240343+0000 mgr.vm01.nwhpas (mgr.14227) 462 : cluster [DBG] pgmap v247: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:29:00.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:00 vm01 bash[28222]: cluster 2026-04-16T19:28:59.240343+0000 mgr.vm01.nwhpas (mgr.14227) 462 : cluster [DBG] pgmap v247: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:29:00.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:00 vm01 bash[28222]: cluster 2026-04-16T19:28:59.240343+0000 mgr.vm01.nwhpas (mgr.14227) 462 : cluster [DBG] pgmap v247: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:29:01.168 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:29:01.335 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:01.335 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 10s ago 5m 115M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:01.335 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 10s ago 5m - - 2026-04-16T19:29:01.335 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 2m ago 5m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:29:01.335 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5m) 2m ago 5m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:01.553 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:29:01.553 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:29:01.553 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:29:01.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:01 vm04 bash[34817]: audit 2026-04-16T19:29:01.548751+0000 mon.vm01 (mon.0) 1049 : audit [DBG] from='client.? 192.168.123.101:0/1399500876' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:01.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:01 vm04 bash[34817]: audit 2026-04-16T19:29:01.548751+0000 mon.vm01 (mon.0) 1049 : audit [DBG] from='client.? 192.168.123.101:0/1399500876' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:01.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:01 vm01 bash[28222]: audit 2026-04-16T19:29:01.548751+0000 mon.vm01 (mon.0) 1049 : audit [DBG] from='client.? 192.168.123.101:0/1399500876' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:01.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:01 vm01 bash[28222]: audit 2026-04-16T19:29:01.548751+0000 mon.vm01 (mon.0) 1049 : audit [DBG] from='client.? 192.168.123.101:0/1399500876' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:02.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:02 vm04 bash[34817]: audit 2026-04-16T19:29:01.147588+0000 mgr.vm01.nwhpas (mgr.14227) 463 : audit [DBG] from='client.15448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:02.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:02 vm04 bash[34817]: audit 2026-04-16T19:29:01.147588+0000 mgr.vm01.nwhpas (mgr.14227) 463 : audit [DBG] from='client.15448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:02.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:02 vm04 bash[34817]: cluster 2026-04-16T19:29:01.240731+0000 mgr.vm01.nwhpas (mgr.14227) 464 : cluster [DBG] pgmap v248: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:29:02.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:02 vm04 bash[34817]: cluster 2026-04-16T19:29:01.240731+0000 mgr.vm01.nwhpas (mgr.14227) 464 : cluster [DBG] pgmap v248: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:29:02.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:02 vm04 bash[34817]: audit 2026-04-16T19:29:01.328804+0000 mgr.vm01.nwhpas (mgr.14227) 465 : audit [DBG] from='client.15452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:02.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:02 vm04 bash[34817]: audit 2026-04-16T19:29:01.328804+0000 mgr.vm01.nwhpas (mgr.14227) 465 : audit [DBG] from='client.15452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:02 vm01 bash[28222]: audit 2026-04-16T19:29:01.147588+0000 mgr.vm01.nwhpas (mgr.14227) 463 : audit [DBG] from='client.15448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:02 vm01 bash[28222]: audit 2026-04-16T19:29:01.147588+0000 mgr.vm01.nwhpas (mgr.14227) 463 : audit [DBG] from='client.15448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:02 vm01 bash[28222]: cluster 2026-04-16T19:29:01.240731+0000 mgr.vm01.nwhpas (mgr.14227) 464 : cluster [DBG] pgmap v248: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:29:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:02 vm01 bash[28222]: cluster 2026-04-16T19:29:01.240731+0000 mgr.vm01.nwhpas (mgr.14227) 464 : cluster [DBG] pgmap v248: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 87 B/s rd, 175 B/s wr, 0 op/s 2026-04-16T19:29:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:02 vm01 bash[28222]: audit 2026-04-16T19:29:01.328804+0000 mgr.vm01.nwhpas (mgr.14227) 465 : audit [DBG] from='client.15452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:02 vm01 bash[28222]: audit 2026-04-16T19:29:01.328804+0000 mgr.vm01.nwhpas (mgr.14227) 465 : audit [DBG] from='client.15452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:04.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:04 vm01 bash[28222]: cluster 2026-04-16T19:29:03.241157+0000 mgr.vm01.nwhpas (mgr.14227) 466 : cluster [DBG] pgmap v249: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:04.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:04 vm01 bash[28222]: cluster 2026-04-16T19:29:03.241157+0000 mgr.vm01.nwhpas (mgr.14227) 466 : cluster [DBG] pgmap v249: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:04.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:04 vm04 bash[34817]: cluster 2026-04-16T19:29:03.241157+0000 mgr.vm01.nwhpas (mgr.14227) 466 : cluster [DBG] pgmap v249: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:04.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:04 vm04 bash[34817]: cluster 2026-04-16T19:29:03.241157+0000 mgr.vm01.nwhpas (mgr.14227) 466 : cluster [DBG] pgmap v249: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:06.757 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to stop 2026-04-16T19:29:06.930 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:06.930 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 16s ago 6m 115M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:06.930 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 16s ago 6m - - 2026-04-16T19:29:06.930 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (6m) 2m ago 6m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:29:06.930 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:06.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:06 vm04 bash[34817]: cluster 2026-04-16T19:29:05.241521+0000 mgr.vm01.nwhpas (mgr.14227) 467 : cluster [DBG] pgmap v250: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:06.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:06 vm04 bash[34817]: cluster 2026-04-16T19:29:05.241521+0000 mgr.vm01.nwhpas (mgr.14227) 467 : cluster [DBG] pgmap v250: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:06 vm01 bash[28222]: cluster 2026-04-16T19:29:05.241521+0000 mgr.vm01.nwhpas (mgr.14227) 467 : cluster [DBG] pgmap v250: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:06 vm01 bash[28222]: cluster 2026-04-16T19:29:05.241521+0000 mgr.vm01.nwhpas (mgr.14227) 467 : cluster [DBG] pgmap v250: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:07.104 INFO:teuthology.orchestra.run.vm01.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-16T19:29:07.104 INFO:teuthology.orchestra.run.vm01.stderr: Dload Upload Total Spent Left Speed 2026-04-16T19:29:07.105 INFO:teuthology.orchestra.run.vm01.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-16T19:29:07.301 INFO:teuthology.orchestra.run.vm01.stdout:anonymousScheduled to start rgw.foo.vm01.qgurbb on host 'vm01' 2026-04-16T19:29:07.506 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to start 2026-04-16T19:29:07.698 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:07.698 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 16s ago 6m 115M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:07.698 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 16s ago 6m - - 2026-04-16T19:29:07.698 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (6m) 2m ago 6m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:29:07.698 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:07.918 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:29:07.918 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:29:07.918 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:29:07.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:07 vm04 bash[34817]: audit 2026-04-16T19:29:06.737097+0000 mgr.vm01.nwhpas (mgr.14227) 468 : audit [DBG] from='client.15460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:07.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:07 vm04 bash[34817]: audit 2026-04-16T19:29:06.737097+0000 mgr.vm01.nwhpas (mgr.14227) 468 : audit [DBG] from='client.15460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:07.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:07 vm04 bash[34817]: audit 2026-04-16T19:29:07.278160+0000 mon.vm01 (mon.0) 1050 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:07.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:07 vm04 bash[34817]: audit 2026-04-16T19:29:07.278160+0000 mon.vm01 (mon.0) 1050 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:07.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:07 vm04 bash[34817]: audit 2026-04-16T19:29:07.296331+0000 mon.vm01 (mon.0) 1051 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:07.958 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:07 vm04 bash[34817]: audit 2026-04-16T19:29:07.296331+0000 mon.vm01 (mon.0) 1051 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:07.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:07 vm04 bash[34817]: audit 2026-04-16T19:29:07.297241+0000 mon.vm01 (mon.0) 1052 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:07.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:07 vm04 bash[34817]: audit 2026-04-16T19:29:07.297241+0000 mon.vm01 (mon.0) 1052 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:07.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:07 vm04 bash[34817]: audit 2026-04-16T19:29:07.568141+0000 mon.vm01 (mon.0) 1053 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:07.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:07 vm04 bash[34817]: audit 2026-04-16T19:29:07.568141+0000 mon.vm01 (mon.0) 1053 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:07 vm01 bash[28222]: audit 2026-04-16T19:29:06.737097+0000 mgr.vm01.nwhpas (mgr.14227) 468 : audit [DBG] from='client.15460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:07 vm01 bash[28222]: audit 2026-04-16T19:29:06.737097+0000 mgr.vm01.nwhpas (mgr.14227) 468 : audit [DBG] from='client.15460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:07 vm01 bash[28222]: audit 2026-04-16T19:29:07.278160+0000 mon.vm01 (mon.0) 1050 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:07 vm01 bash[28222]: audit 2026-04-16T19:29:07.278160+0000 mon.vm01 (mon.0) 1050 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:07 vm01 bash[28222]: audit 2026-04-16T19:29:07.296331+0000 mon.vm01 (mon.0) 1051 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:07 vm01 bash[28222]: audit 2026-04-16T19:29:07.296331+0000 mon.vm01 (mon.0) 1051 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:07 vm01 bash[28222]: audit 2026-04-16T19:29:07.297241+0000 mon.vm01 (mon.0) 1052 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:07 vm01 bash[28222]: audit 2026-04-16T19:29:07.297241+0000 mon.vm01 (mon.0) 1052 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:07 vm01 bash[28222]: audit 2026-04-16T19:29:07.568141+0000 mon.vm01 (mon.0) 1053 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:07 vm01 bash[28222]: audit 2026-04-16T19:29:07.568141+0000 mon.vm01 (mon.0) 1053 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: audit 2026-04-16T19:29:06.923922+0000 mgr.vm01.nwhpas (mgr.14227) 469 : audit [DBG] from='client.15464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: audit 2026-04-16T19:29:06.923922+0000 mgr.vm01.nwhpas (mgr.14227) 469 : audit [DBG] from='client.15464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: cluster 2026-04-16T19:29:07.241897+0000 mgr.vm01.nwhpas (mgr.14227) 470 : cluster [DBG] pgmap v251: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: cluster 2026-04-16T19:29:07.241897+0000 mgr.vm01.nwhpas (mgr.14227) 470 : cluster [DBG] pgmap v251: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: audit 2026-04-16T19:29:07.271422+0000 mgr.vm01.nwhpas (mgr.14227) 471 : audit [DBG] from='client.15472 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm01.qgurbb", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: audit 2026-04-16T19:29:07.271422+0000 mgr.vm01.nwhpas (mgr.14227) 471 : audit [DBG] from='client.15472 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm01.qgurbb", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: cephadm 2026-04-16T19:29:07.271808+0000 mgr.vm01.nwhpas (mgr.14227) 472 : cephadm [INF] Schedule start daemon rgw.foo.vm01.qgurbb 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: cephadm 2026-04-16T19:29:07.271808+0000 mgr.vm01.nwhpas (mgr.14227) 472 : cephadm [INF] Schedule start daemon rgw.foo.vm01.qgurbb 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: audit 2026-04-16T19:29:07.485767+0000 mgr.vm01.nwhpas (mgr.14227) 473 : audit [DBG] from='client.15476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: audit 2026-04-16T19:29:07.485767+0000 mgr.vm01.nwhpas (mgr.14227) 473 : audit [DBG] from='client.15476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: audit 2026-04-16T19:29:07.691459+0000 mgr.vm01.nwhpas (mgr.14227) 474 : audit [DBG] from='client.15480 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: audit 2026-04-16T19:29:07.691459+0000 mgr.vm01.nwhpas (mgr.14227) 474 : audit [DBG] from='client.15480 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: audit 2026-04-16T19:29:07.914346+0000 mon.vm01 (mon.0) 1054 : audit [DBG] from='client.? 192.168.123.101:0/3428296417' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:08 vm01 bash[28222]: audit 2026-04-16T19:29:07.914346+0000 mon.vm01 (mon.0) 1054 : audit [DBG] from='client.? 192.168.123.101:0/3428296417' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:09.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: audit 2026-04-16T19:29:06.923922+0000 mgr.vm01.nwhpas (mgr.14227) 469 : audit [DBG] from='client.15464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: audit 2026-04-16T19:29:06.923922+0000 mgr.vm01.nwhpas (mgr.14227) 469 : audit [DBG] from='client.15464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: cluster 2026-04-16T19:29:07.241897+0000 mgr.vm01.nwhpas (mgr.14227) 470 : cluster [DBG] pgmap v251: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: cluster 2026-04-16T19:29:07.241897+0000 mgr.vm01.nwhpas (mgr.14227) 470 : cluster [DBG] pgmap v251: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: audit 2026-04-16T19:29:07.271422+0000 mgr.vm01.nwhpas (mgr.14227) 471 : audit [DBG] from='client.15472 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm01.qgurbb", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: audit 2026-04-16T19:29:07.271422+0000 mgr.vm01.nwhpas (mgr.14227) 471 : audit [DBG] from='client.15472 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm01.qgurbb", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: cephadm 2026-04-16T19:29:07.271808+0000 mgr.vm01.nwhpas (mgr.14227) 472 : cephadm [INF] Schedule start daemon rgw.foo.vm01.qgurbb 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: cephadm 2026-04-16T19:29:07.271808+0000 mgr.vm01.nwhpas (mgr.14227) 472 : cephadm [INF] Schedule start daemon rgw.foo.vm01.qgurbb 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: audit 2026-04-16T19:29:07.485767+0000 mgr.vm01.nwhpas (mgr.14227) 473 : audit [DBG] from='client.15476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: audit 2026-04-16T19:29:07.485767+0000 mgr.vm01.nwhpas (mgr.14227) 473 : audit [DBG] from='client.15476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: audit 2026-04-16T19:29:07.691459+0000 mgr.vm01.nwhpas (mgr.14227) 474 : audit [DBG] from='client.15480 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: audit 2026-04-16T19:29:07.691459+0000 mgr.vm01.nwhpas (mgr.14227) 474 : audit [DBG] from='client.15480 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: audit 2026-04-16T19:29:07.914346+0000 mon.vm01 (mon.0) 1054 : audit [DBG] from='client.? 192.168.123.101:0/3428296417' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:09.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:08 vm04 bash[34817]: audit 2026-04-16T19:29:07.914346+0000 mon.vm01 (mon.0) 1054 : audit [DBG] from='client.? 192.168.123.101:0/3428296417' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:11.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:10 vm04 bash[34817]: cluster 2026-04-16T19:29:09.242303+0000 mgr.vm01.nwhpas (mgr.14227) 475 : cluster [DBG] pgmap v252: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:11.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:10 vm04 bash[34817]: cluster 2026-04-16T19:29:09.242303+0000 mgr.vm01.nwhpas (mgr.14227) 475 : cluster [DBG] pgmap v252: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:11.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:10 vm01 bash[28222]: cluster 2026-04-16T19:29:09.242303+0000 mgr.vm01.nwhpas (mgr.14227) 475 : cluster [DBG] pgmap v252: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:11.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:10 vm01 bash[28222]: cluster 2026-04-16T19:29:09.242303+0000 mgr.vm01.nwhpas (mgr.14227) 475 : cluster [DBG] pgmap v252: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:13.033 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: cluster 2026-04-16T19:29:11.242697+0000 mgr.vm01.nwhpas (mgr.14227) 476 : cluster [DBG] pgmap v253: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:13.033 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: cluster 2026-04-16T19:29:11.242697+0000 mgr.vm01.nwhpas (mgr.14227) 476 : cluster [DBG] pgmap v253: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.524238+0000 mon.vm01 (mon.0) 1055 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.524238+0000 mon.vm01 (mon.0) 1055 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.528779+0000 mon.vm01 (mon.0) 1056 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.528779+0000 mon.vm01 (mon.0) 1056 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.529507+0000 mon.vm01 (mon.0) 1057 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.529507+0000 mon.vm01 (mon.0) 1057 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.529943+0000 mon.vm01 (mon.0) 1058 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.529943+0000 mon.vm01 (mon.0) 1058 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.533670+0000 mon.vm01 (mon.0) 1059 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.533670+0000 mon.vm01 (mon.0) 1059 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.534918+0000 mon.vm01 (mon.0) 1060 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:13.034 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:12 vm01 bash[28222]: audit 2026-04-16T19:29:12.534918+0000 mon.vm01 (mon.0) 1060 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:13.139 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm01.qgurbb to start 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: cluster 2026-04-16T19:29:11.242697+0000 mgr.vm01.nwhpas (mgr.14227) 476 : cluster [DBG] pgmap v253: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: cluster 2026-04-16T19:29:11.242697+0000 mgr.vm01.nwhpas (mgr.14227) 476 : cluster [DBG] pgmap v253: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.524238+0000 mon.vm01 (mon.0) 1055 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.524238+0000 mon.vm01 (mon.0) 1055 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.528779+0000 mon.vm01 (mon.0) 1056 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.528779+0000 mon.vm01 (mon.0) 1056 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.529507+0000 mon.vm01 (mon.0) 1057 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.529507+0000 mon.vm01 (mon.0) 1057 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.529943+0000 mon.vm01 (mon.0) 1058 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.529943+0000 mon.vm01 (mon.0) 1058 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.533670+0000 mon.vm01 (mon.0) 1059 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.533670+0000 mon.vm01 (mon.0) 1059 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.534918+0000 mon.vm01 (mon.0) 1060 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:12 vm04 bash[34817]: audit 2026-04-16T19:29:12.534918+0000 mon.vm01 (mon.0) 1060 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:13.359 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:13.359 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 0s ago 6m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:13.359 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 error 0s ago 6m - - 2026-04-16T19:29:13.359 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (6m) 2m ago 6m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:29:13.359 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:13.743 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:29:13.743 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:29:13.743 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm01.qgurbb on vm01 is in error state 2026-04-16T19:29:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:13 vm04 bash[34817]: cluster 2026-04-16T19:29:12.530690+0000 mgr.vm01.nwhpas (mgr.14227) 477 : cluster [DBG] pgmap v254: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:13 vm04 bash[34817]: cluster 2026-04-16T19:29:12.530690+0000 mgr.vm01.nwhpas (mgr.14227) 477 : cluster [DBG] pgmap v254: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:13 vm04 bash[34817]: cluster 2026-04-16T19:29:12.530773+0000 mgr.vm01.nwhpas (mgr.14227) 478 : cluster [DBG] pgmap v255: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:13 vm04 bash[34817]: cluster 2026-04-16T19:29:12.530773+0000 mgr.vm01.nwhpas (mgr.14227) 478 : cluster [DBG] pgmap v255: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:13 vm04 bash[34817]: audit 2026-04-16T19:29:13.051319+0000 mon.vm01 (mon.0) 1061 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:13 vm04 bash[34817]: audit 2026-04-16T19:29:13.051319+0000 mon.vm01 (mon.0) 1061 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:13 vm04 bash[34817]: audit 2026-04-16T19:29:13.056010+0000 mon.vm01 (mon.0) 1062 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:13 vm04 bash[34817]: audit 2026-04-16T19:29:13.056010+0000 mon.vm01 (mon.0) 1062 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:13 vm04 bash[34817]: audit 2026-04-16T19:29:13.057619+0000 mon.vm01 (mon.0) 1063 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:13 vm04 bash[34817]: audit 2026-04-16T19:29:13.057619+0000 mon.vm01 (mon.0) 1063 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:13 vm01 bash[28222]: cluster 2026-04-16T19:29:12.530690+0000 mgr.vm01.nwhpas (mgr.14227) 477 : cluster [DBG] pgmap v254: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:13 vm01 bash[28222]: cluster 2026-04-16T19:29:12.530690+0000 mgr.vm01.nwhpas (mgr.14227) 477 : cluster [DBG] pgmap v254: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:13 vm01 bash[28222]: cluster 2026-04-16T19:29:12.530773+0000 mgr.vm01.nwhpas (mgr.14227) 478 : cluster [DBG] pgmap v255: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:13 vm01 bash[28222]: cluster 2026-04-16T19:29:12.530773+0000 mgr.vm01.nwhpas (mgr.14227) 478 : cluster [DBG] pgmap v255: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:13 vm01 bash[28222]: audit 2026-04-16T19:29:13.051319+0000 mon.vm01 (mon.0) 1061 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:13 vm01 bash[28222]: audit 2026-04-16T19:29:13.051319+0000 mon.vm01 (mon.0) 1061 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:13 vm01 bash[28222]: audit 2026-04-16T19:29:13.056010+0000 mon.vm01 (mon.0) 1062 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:13 vm01 bash[28222]: audit 2026-04-16T19:29:13.056010+0000 mon.vm01 (mon.0) 1062 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:13 vm01 bash[28222]: audit 2026-04-16T19:29:13.057619+0000 mon.vm01 (mon.0) 1063 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:13 vm01 bash[28222]: audit 2026-04-16T19:29:13.057619+0000 mon.vm01 (mon.0) 1063 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:15.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:14 vm04 bash[34817]: audit 2026-04-16T19:29:13.103346+0000 mgr.vm01.nwhpas (mgr.14227) 479 : audit [DBG] from='client.15488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:15.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:14 vm04 bash[34817]: audit 2026-04-16T19:29:13.103346+0000 mgr.vm01.nwhpas (mgr.14227) 479 : audit [DBG] from='client.15488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:15.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:14 vm04 bash[34817]: audit 2026-04-16T19:29:13.349886+0000 mgr.vm01.nwhpas (mgr.14227) 480 : audit [DBG] from='client.15492 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:15.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:14 vm04 bash[34817]: audit 2026-04-16T19:29:13.349886+0000 mgr.vm01.nwhpas (mgr.14227) 480 : audit [DBG] from='client.15492 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:15.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:14 vm04 bash[34817]: audit 2026-04-16T19:29:13.739111+0000 mon.vm01 (mon.0) 1064 : audit [DBG] from='client.? 192.168.123.101:0/2058142690' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:15.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:14 vm04 bash[34817]: audit 2026-04-16T19:29:13.739111+0000 mon.vm01 (mon.0) 1064 : audit [DBG] from='client.? 192.168.123.101:0/2058142690' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:14 vm01 bash[28222]: audit 2026-04-16T19:29:13.103346+0000 mgr.vm01.nwhpas (mgr.14227) 479 : audit [DBG] from='client.15488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:14 vm01 bash[28222]: audit 2026-04-16T19:29:13.103346+0000 mgr.vm01.nwhpas (mgr.14227) 479 : audit [DBG] from='client.15488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:14 vm01 bash[28222]: audit 2026-04-16T19:29:13.349886+0000 mgr.vm01.nwhpas (mgr.14227) 480 : audit [DBG] from='client.15492 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:14 vm01 bash[28222]: audit 2026-04-16T19:29:13.349886+0000 mgr.vm01.nwhpas (mgr.14227) 480 : audit [DBG] from='client.15492 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:14 vm01 bash[28222]: audit 2026-04-16T19:29:13.739111+0000 mon.vm01 (mon.0) 1064 : audit [DBG] from='client.? 192.168.123.101:0/2058142690' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:14 vm01 bash[28222]: audit 2026-04-16T19:29:13.739111+0000 mon.vm01 (mon.0) 1064 : audit [DBG] from='client.? 192.168.123.101:0/2058142690' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:16.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:15 vm04 bash[34817]: cluster 2026-04-16T19:29:14.531256+0000 mgr.vm01.nwhpas (mgr.14227) 481 : cluster [DBG] pgmap v256: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 440 B/s wr, 44 op/s 2026-04-16T19:29:16.208 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:15 vm04 bash[34817]: cluster 2026-04-16T19:29:14.531256+0000 mgr.vm01.nwhpas (mgr.14227) 481 : cluster [DBG] pgmap v256: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 440 B/s wr, 44 op/s 2026-04-16T19:29:16.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:15 vm01 bash[28222]: cluster 2026-04-16T19:29:14.531256+0000 mgr.vm01.nwhpas (mgr.14227) 481 : cluster [DBG] pgmap v256: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 440 B/s wr, 44 op/s 2026-04-16T19:29:16.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:15 vm01 bash[28222]: cluster 2026-04-16T19:29:14.531256+0000 mgr.vm01.nwhpas (mgr.14227) 481 : cluster [DBG] pgmap v256: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 440 B/s wr, 44 op/s 2026-04-16T19:29:18.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:17 vm04 bash[34817]: cluster 2026-04-16T19:29:16.531657+0000 mgr.vm01.nwhpas (mgr.14227) 482 : cluster [DBG] pgmap v257: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 440 B/s wr, 110 op/s 2026-04-16T19:29:18.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:17 vm04 bash[34817]: cluster 2026-04-16T19:29:16.531657+0000 mgr.vm01.nwhpas (mgr.14227) 482 : cluster [DBG] pgmap v257: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 440 B/s wr, 110 op/s 2026-04-16T19:29:18.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:17 vm01 bash[28222]: cluster 2026-04-16T19:29:16.531657+0000 mgr.vm01.nwhpas (mgr.14227) 482 : cluster [DBG] pgmap v257: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 440 B/s wr, 110 op/s 2026-04-16T19:29:18.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:17 vm01 bash[28222]: cluster 2026-04-16T19:29:16.531657+0000 mgr.vm01.nwhpas (mgr.14227) 482 : cluster [DBG] pgmap v257: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 440 B/s wr, 110 op/s 2026-04-16T19:29:18.946 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5s) 0s ago 6m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:29:19.132 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to stop rgw.foo.vm04.rpimxa on host 'vm04' 2026-04-16T19:29:19.338 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:29:19.524 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:19.524 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 1s ago 6m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:19.524 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (6s) 1s ago 6m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:29:19.524 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (6m) 2m ago 6m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:29:19.524 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.305989+0000 mon.vm01 (mon.0) 1065 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.305989+0000 mon.vm01 (mon.0) 1065 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.310516+0000 mon.vm01 (mon.0) 1066 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.310516+0000 mon.vm01 (mon.0) 1066 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.311242+0000 mon.vm01 (mon.0) 1067 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.311242+0000 mon.vm01 (mon.0) 1067 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.311658+0000 mon.vm01 (mon.0) 1068 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.311658+0000 mon.vm01 (mon.0) 1068 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: cluster 2026-04-16T19:29:18.312472+0000 mgr.vm01.nwhpas (mgr.14227) 483 : cluster [DBG] pgmap v258: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 451 B/s wr, 144 op/s 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: cluster 2026-04-16T19:29:18.312472+0000 mgr.vm01.nwhpas (mgr.14227) 483 : cluster [DBG] pgmap v258: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 451 B/s wr, 144 op/s 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.314680+0000 mon.vm01 (mon.0) 1069 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.314680+0000 mon.vm01 (mon.0) 1069 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.315788+0000 mon.vm01 (mon.0) 1070 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:18.315788+0000 mon.vm01 (mon.0) 1070 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.123467+0000 mon.vm01 (mon.0) 1071 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.123467+0000 mon.vm01 (mon.0) 1071 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.127276+0000 mon.vm01 (mon.0) 1072 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.127276+0000 mon.vm01 (mon.0) 1072 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.127957+0000 mon.vm01 (mon.0) 1073 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.127957+0000 mon.vm01 (mon.0) 1073 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.128946+0000 mon.vm01 (mon.0) 1074 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.128946+0000 mon.vm01 (mon.0) 1074 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.129344+0000 mon.vm01 (mon.0) 1075 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.129344+0000 mon.vm01 (mon.0) 1075 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.132430+0000 mon.vm01 (mon.0) 1076 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.132430+0000 mon.vm01 (mon.0) 1076 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.133666+0000 mon.vm01 (mon.0) 1077 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:19.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:19 vm04 bash[34817]: audit 2026-04-16T19:29:19.133666+0000 mon.vm01 (mon.0) 1077 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.305989+0000 mon.vm01 (mon.0) 1065 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.305989+0000 mon.vm01 (mon.0) 1065 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.310516+0000 mon.vm01 (mon.0) 1066 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.310516+0000 mon.vm01 (mon.0) 1066 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.311242+0000 mon.vm01 (mon.0) 1067 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.311242+0000 mon.vm01 (mon.0) 1067 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.311658+0000 mon.vm01 (mon.0) 1068 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.311658+0000 mon.vm01 (mon.0) 1068 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: cluster 2026-04-16T19:29:18.312472+0000 mgr.vm01.nwhpas (mgr.14227) 483 : cluster [DBG] pgmap v258: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 451 B/s wr, 144 op/s 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: cluster 2026-04-16T19:29:18.312472+0000 mgr.vm01.nwhpas (mgr.14227) 483 : cluster [DBG] pgmap v258: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 451 B/s wr, 144 op/s 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.314680+0000 mon.vm01 (mon.0) 1069 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.314680+0000 mon.vm01 (mon.0) 1069 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.315788+0000 mon.vm01 (mon.0) 1070 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:18.315788+0000 mon.vm01 (mon.0) 1070 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.123467+0000 mon.vm01 (mon.0) 1071 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.123467+0000 mon.vm01 (mon.0) 1071 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.127276+0000 mon.vm01 (mon.0) 1072 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.127276+0000 mon.vm01 (mon.0) 1072 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.127957+0000 mon.vm01 (mon.0) 1073 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.127957+0000 mon.vm01 (mon.0) 1073 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.128946+0000 mon.vm01 (mon.0) 1074 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:19.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.128946+0000 mon.vm01 (mon.0) 1074 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:19.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.129344+0000 mon.vm01 (mon.0) 1075 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:19.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.129344+0000 mon.vm01 (mon.0) 1075 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:19.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.132430+0000 mon.vm01 (mon.0) 1076 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.132430+0000 mon.vm01 (mon.0) 1076 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:19.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.133666+0000 mon.vm01 (mon.0) 1077 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:19.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:19 vm01 bash[28222]: audit 2026-04-16T19:29:19.133666+0000 mon.vm01 (mon.0) 1077 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:19.742 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: audit 2026-04-16T19:29:18.925637+0000 mgr.vm01.nwhpas (mgr.14227) 484 : audit [DBG] from='client.15512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: audit 2026-04-16T19:29:18.925637+0000 mgr.vm01.nwhpas (mgr.14227) 484 : audit [DBG] from='client.15512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: audit 2026-04-16T19:29:19.117618+0000 mgr.vm01.nwhpas (mgr.14227) 485 : audit [DBG] from='client.15516 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.rpimxa", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: audit 2026-04-16T19:29:19.117618+0000 mgr.vm01.nwhpas (mgr.14227) 485 : audit [DBG] from='client.15516 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.rpimxa", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: cephadm 2026-04-16T19:29:19.117939+0000 mgr.vm01.nwhpas (mgr.14227) 486 : cephadm [INF] Schedule stop daemon rgw.foo.vm04.rpimxa 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: cephadm 2026-04-16T19:29:19.117939+0000 mgr.vm01.nwhpas (mgr.14227) 486 : cephadm [INF] Schedule stop daemon rgw.foo.vm04.rpimxa 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: cluster 2026-04-16T19:29:19.309471+0000 mon.vm01 (mon.0) 1078 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: cluster 2026-04-16T19:29:19.309471+0000 mon.vm01 (mon.0) 1078 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: cluster 2026-04-16T19:29:19.309485+0000 mon.vm01 (mon.0) 1079 : cluster [INF] Cluster is now healthy 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: cluster 2026-04-16T19:29:19.309485+0000 mon.vm01 (mon.0) 1079 : cluster [INF] Cluster is now healthy 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: audit 2026-04-16T19:29:19.317830+0000 mgr.vm01.nwhpas (mgr.14227) 487 : audit [DBG] from='client.15520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: audit 2026-04-16T19:29:19.317830+0000 mgr.vm01.nwhpas (mgr.14227) 487 : audit [DBG] from='client.15520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: audit 2026-04-16T19:29:19.517367+0000 mgr.vm01.nwhpas (mgr.14227) 488 : audit [DBG] from='client.15524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: audit 2026-04-16T19:29:19.517367+0000 mgr.vm01.nwhpas (mgr.14227) 488 : audit [DBG] from='client.15524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: audit 2026-04-16T19:29:19.737776+0000 mon.vm01 (mon.0) 1080 : audit [DBG] from='client.? 192.168.123.101:0/2552779199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:20.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:20 vm04 bash[34817]: audit 2026-04-16T19:29:19.737776+0000 mon.vm01 (mon.0) 1080 : audit [DBG] from='client.? 192.168.123.101:0/2552779199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: audit 2026-04-16T19:29:18.925637+0000 mgr.vm01.nwhpas (mgr.14227) 484 : audit [DBG] from='client.15512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: audit 2026-04-16T19:29:18.925637+0000 mgr.vm01.nwhpas (mgr.14227) 484 : audit [DBG] from='client.15512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: audit 2026-04-16T19:29:19.117618+0000 mgr.vm01.nwhpas (mgr.14227) 485 : audit [DBG] from='client.15516 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.rpimxa", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: audit 2026-04-16T19:29:19.117618+0000 mgr.vm01.nwhpas (mgr.14227) 485 : audit [DBG] from='client.15516 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.rpimxa", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: cephadm 2026-04-16T19:29:19.117939+0000 mgr.vm01.nwhpas (mgr.14227) 486 : cephadm [INF] Schedule stop daemon rgw.foo.vm04.rpimxa 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: cephadm 2026-04-16T19:29:19.117939+0000 mgr.vm01.nwhpas (mgr.14227) 486 : cephadm [INF] Schedule stop daemon rgw.foo.vm04.rpimxa 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: cluster 2026-04-16T19:29:19.309471+0000 mon.vm01 (mon.0) 1078 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: cluster 2026-04-16T19:29:19.309471+0000 mon.vm01 (mon.0) 1078 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: cluster 2026-04-16T19:29:19.309485+0000 mon.vm01 (mon.0) 1079 : cluster [INF] Cluster is now healthy 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: cluster 2026-04-16T19:29:19.309485+0000 mon.vm01 (mon.0) 1079 : cluster [INF] Cluster is now healthy 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: audit 2026-04-16T19:29:19.317830+0000 mgr.vm01.nwhpas (mgr.14227) 487 : audit [DBG] from='client.15520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: audit 2026-04-16T19:29:19.317830+0000 mgr.vm01.nwhpas (mgr.14227) 487 : audit [DBG] from='client.15520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: audit 2026-04-16T19:29:19.517367+0000 mgr.vm01.nwhpas (mgr.14227) 488 : audit [DBG] from='client.15524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: audit 2026-04-16T19:29:19.517367+0000 mgr.vm01.nwhpas (mgr.14227) 488 : audit [DBG] from='client.15524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: audit 2026-04-16T19:29:19.737776+0000 mon.vm01 (mon.0) 1080 : audit [DBG] from='client.? 192.168.123.101:0/2552779199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:20.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:20 vm01 bash[28222]: audit 2026-04-16T19:29:19.737776+0000 mon.vm01 (mon.0) 1080 : audit [DBG] from='client.? 192.168.123.101:0/2552779199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:21.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:21 vm04 bash[34817]: cluster 2026-04-16T19:29:20.312822+0000 mgr.vm01.nwhpas (mgr.14227) 489 : cluster [DBG] pgmap v259: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 451 B/s wr, 144 op/s 2026-04-16T19:29:21.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:21 vm04 bash[34817]: cluster 2026-04-16T19:29:20.312822+0000 mgr.vm01.nwhpas (mgr.14227) 489 : cluster [DBG] pgmap v259: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 451 B/s wr, 144 op/s 2026-04-16T19:29:21.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:21 vm01 bash[28222]: cluster 2026-04-16T19:29:20.312822+0000 mgr.vm01.nwhpas (mgr.14227) 489 : cluster [DBG] pgmap v259: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 451 B/s wr, 144 op/s 2026-04-16T19:29:21.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:21 vm01 bash[28222]: cluster 2026-04-16T19:29:20.312822+0000 mgr.vm01.nwhpas (mgr.14227) 489 : cluster [DBG] pgmap v259: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 88 KiB/s rd, 451 B/s wr, 144 op/s 2026-04-16T19:29:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:23 vm04 bash[34817]: cluster 2026-04-16T19:29:22.313164+0000 mgr.vm01.nwhpas (mgr.14227) 490 : cluster [DBG] pgmap v260: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 82 KiB/s rd, 418 B/s wr, 134 op/s 2026-04-16T19:29:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:23 vm04 bash[34817]: cluster 2026-04-16T19:29:22.313164+0000 mgr.vm01.nwhpas (mgr.14227) 490 : cluster [DBG] pgmap v260: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 82 KiB/s rd, 418 B/s wr, 134 op/s 2026-04-16T19:29:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:23 vm04 bash[34817]: audit 2026-04-16T19:29:22.571530+0000 mon.vm01 (mon.0) 1081 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:23 vm04 bash[34817]: audit 2026-04-16T19:29:22.571530+0000 mon.vm01 (mon.0) 1081 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:23 vm04 bash[34817]: audit 2026-04-16T19:29:22.572072+0000 mon.vm01 (mon.0) 1082 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:23 vm04 bash[34817]: audit 2026-04-16T19:29:22.572072+0000 mon.vm01 (mon.0) 1082 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:23.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:23 vm01 bash[28222]: cluster 2026-04-16T19:29:22.313164+0000 mgr.vm01.nwhpas (mgr.14227) 490 : cluster [DBG] pgmap v260: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 82 KiB/s rd, 418 B/s wr, 134 op/s 2026-04-16T19:29:23.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:23 vm01 bash[28222]: cluster 2026-04-16T19:29:22.313164+0000 mgr.vm01.nwhpas (mgr.14227) 490 : cluster [DBG] pgmap v260: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 82 KiB/s rd, 418 B/s wr, 134 op/s 2026-04-16T19:29:23.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:23 vm01 bash[28222]: audit 2026-04-16T19:29:22.571530+0000 mon.vm01 (mon.0) 1081 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:23.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:23 vm01 bash[28222]: audit 2026-04-16T19:29:22.571530+0000 mon.vm01 (mon.0) 1081 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:23.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:23 vm01 bash[28222]: audit 2026-04-16T19:29:22.572072+0000 mon.vm01 (mon.0) 1082 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:23.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:23 vm01 bash[28222]: audit 2026-04-16T19:29:22.572072+0000 mon.vm01 (mon.0) 1082 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:24.931 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:29:25.103 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:25.103 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 6s ago 6m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:25.103 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (12s) 6s ago 6m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:29:25.103 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (6m) 2m ago 6m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:29:25.103 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:25.326 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:29:25.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:25 vm04 bash[34817]: cluster 2026-04-16T19:29:24.313545+0000 mgr.vm01.nwhpas (mgr.14227) 491 : cluster [DBG] pgmap v261: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 521 B/s wr, 111 op/s 2026-04-16T19:29:25.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:25 vm04 bash[34817]: cluster 2026-04-16T19:29:24.313545+0000 mgr.vm01.nwhpas (mgr.14227) 491 : cluster [DBG] pgmap v261: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 521 B/s wr, 111 op/s 2026-04-16T19:29:25.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:25 vm04 bash[34817]: audit 2026-04-16T19:29:25.321851+0000 mon.vm01 (mon.0) 1083 : audit [DBG] from='client.? 192.168.123.101:0/438371006' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:25.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:25 vm04 bash[34817]: audit 2026-04-16T19:29:25.321851+0000 mon.vm01 (mon.0) 1083 : audit [DBG] from='client.? 192.168.123.101:0/438371006' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:25.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:25 vm01 bash[28222]: cluster 2026-04-16T19:29:24.313545+0000 mgr.vm01.nwhpas (mgr.14227) 491 : cluster [DBG] pgmap v261: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 521 B/s wr, 111 op/s 2026-04-16T19:29:25.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:25 vm01 bash[28222]: cluster 2026-04-16T19:29:24.313545+0000 mgr.vm01.nwhpas (mgr.14227) 491 : cluster [DBG] pgmap v261: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 521 B/s wr, 111 op/s 2026-04-16T19:29:25.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:25 vm01 bash[28222]: audit 2026-04-16T19:29:25.321851+0000 mon.vm01 (mon.0) 1083 : audit [DBG] from='client.? 192.168.123.101:0/438371006' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:25.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:25 vm01 bash[28222]: audit 2026-04-16T19:29:25.321851+0000 mon.vm01 (mon.0) 1083 : audit [DBG] from='client.? 192.168.123.101:0/438371006' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:26.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:26 vm04 bash[34817]: audit 2026-04-16T19:29:24.911879+0000 mgr.vm01.nwhpas (mgr.14227) 492 : audit [DBG] from='client.15532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:26.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:26 vm04 bash[34817]: audit 2026-04-16T19:29:24.911879+0000 mgr.vm01.nwhpas (mgr.14227) 492 : audit [DBG] from='client.15532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:26.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:26 vm04 bash[34817]: audit 2026-04-16T19:29:25.096369+0000 mgr.vm01.nwhpas (mgr.14227) 493 : audit [DBG] from='client.15536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:26.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:26 vm04 bash[34817]: audit 2026-04-16T19:29:25.096369+0000 mgr.vm01.nwhpas (mgr.14227) 493 : audit [DBG] from='client.15536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:26.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:26 vm01 bash[28222]: audit 2026-04-16T19:29:24.911879+0000 mgr.vm01.nwhpas (mgr.14227) 492 : audit [DBG] from='client.15532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:26.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:26 vm01 bash[28222]: audit 2026-04-16T19:29:24.911879+0000 mgr.vm01.nwhpas (mgr.14227) 492 : audit [DBG] from='client.15532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:26.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:26 vm01 bash[28222]: audit 2026-04-16T19:29:25.096369+0000 mgr.vm01.nwhpas (mgr.14227) 493 : audit [DBG] from='client.15536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:26.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:26 vm01 bash[28222]: audit 2026-04-16T19:29:25.096369+0000 mgr.vm01.nwhpas (mgr.14227) 493 : audit [DBG] from='client.15536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:27.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:27 vm04 bash[34817]: cluster 2026-04-16T19:29:26.313936+0000 mgr.vm01.nwhpas (mgr.14227) 494 : cluster [DBG] pgmap v262: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 173 B/s wr, 76 op/s 2026-04-16T19:29:27.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:27 vm04 bash[34817]: cluster 2026-04-16T19:29:26.313936+0000 mgr.vm01.nwhpas (mgr.14227) 494 : cluster [DBG] pgmap v262: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 173 B/s wr, 76 op/s 2026-04-16T19:29:27.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:27 vm01 bash[28222]: cluster 2026-04-16T19:29:26.313936+0000 mgr.vm01.nwhpas (mgr.14227) 494 : cluster [DBG] pgmap v262: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 173 B/s wr, 76 op/s 2026-04-16T19:29:27.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:27 vm01 bash[28222]: cluster 2026-04-16T19:29:26.313936+0000 mgr.vm01.nwhpas (mgr.14227) 494 : cluster [DBG] pgmap v262: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 173 B/s wr, 76 op/s 2026-04-16T19:29:29.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:29 vm04 bash[34817]: cluster 2026-04-16T19:29:28.314266+0000 mgr.vm01.nwhpas (mgr.14227) 495 : cluster [DBG] pgmap v263: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 347 B/s wr, 24 op/s 2026-04-16T19:29:29.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:29 vm04 bash[34817]: cluster 2026-04-16T19:29:28.314266+0000 mgr.vm01.nwhpas (mgr.14227) 495 : cluster [DBG] pgmap v263: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 347 B/s wr, 24 op/s 2026-04-16T19:29:29.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:29 vm04 bash[34817]: audit 2026-04-16T19:29:29.564646+0000 mon.vm01 (mon.0) 1084 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:29.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:29 vm04 bash[34817]: audit 2026-04-16T19:29:29.564646+0000 mon.vm01 (mon.0) 1084 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:29.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:29 vm04 bash[34817]: audit 2026-04-16T19:29:29.568592+0000 mon.vm01 (mon.0) 1085 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:29.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:29 vm04 bash[34817]: audit 2026-04-16T19:29:29.568592+0000 mon.vm01 (mon.0) 1085 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:29.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:29 vm04 bash[34817]: audit 2026-04-16T19:29:29.569525+0000 mon.vm01 (mon.0) 1086 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:29.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:29 vm04 bash[34817]: audit 2026-04-16T19:29:29.569525+0000 mon.vm01 (mon.0) 1086 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:29.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:29 vm01 bash[28222]: cluster 2026-04-16T19:29:28.314266+0000 mgr.vm01.nwhpas (mgr.14227) 495 : cluster [DBG] pgmap v263: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 347 B/s wr, 24 op/s 2026-04-16T19:29:29.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:29 vm01 bash[28222]: cluster 2026-04-16T19:29:28.314266+0000 mgr.vm01.nwhpas (mgr.14227) 495 : cluster [DBG] pgmap v263: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 347 B/s wr, 24 op/s 2026-04-16T19:29:29.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:29 vm01 bash[28222]: audit 2026-04-16T19:29:29.564646+0000 mon.vm01 (mon.0) 1084 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:29.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:29 vm01 bash[28222]: audit 2026-04-16T19:29:29.564646+0000 mon.vm01 (mon.0) 1084 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:29.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:29 vm01 bash[28222]: audit 2026-04-16T19:29:29.568592+0000 mon.vm01 (mon.0) 1085 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:29.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:29 vm01 bash[28222]: audit 2026-04-16T19:29:29.568592+0000 mon.vm01 (mon.0) 1085 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:29.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:29 vm01 bash[28222]: audit 2026-04-16T19:29:29.569525+0000 mon.vm01 (mon.0) 1086 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:29.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:29 vm01 bash[28222]: audit 2026-04-16T19:29:29.569525+0000 mon.vm01 (mon.0) 1086 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:29:30.519 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:29:30.686 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:30.686 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 12s ago 6m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:30.686 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (17s) 12s ago 6m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:29:30.686 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (6m) 2m ago 6m 110M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 f2349277c8fe 2026-04-16T19:29:30.686 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:30.897 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:29:31.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:31 vm04 bash[34817]: cluster 2026-04-16T19:29:30.314636+0000 mgr.vm01.nwhpas (mgr.14227) 496 : cluster [DBG] pgmap v264: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:31.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:31 vm04 bash[34817]: cluster 2026-04-16T19:29:30.314636+0000 mgr.vm01.nwhpas (mgr.14227) 496 : cluster [DBG] pgmap v264: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:31.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:31 vm04 bash[34817]: audit 2026-04-16T19:29:30.499320+0000 mgr.vm01.nwhpas (mgr.14227) 497 : audit [DBG] from='client.15544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:31.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:31 vm04 bash[34817]: audit 2026-04-16T19:29:30.499320+0000 mgr.vm01.nwhpas (mgr.14227) 497 : audit [DBG] from='client.15544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:31.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:31 vm04 bash[34817]: audit 2026-04-16T19:29:30.679154+0000 mgr.vm01.nwhpas (mgr.14227) 498 : audit [DBG] from='client.15548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:31.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:31 vm04 bash[34817]: audit 2026-04-16T19:29:30.679154+0000 mgr.vm01.nwhpas (mgr.14227) 498 : audit [DBG] from='client.15548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:31.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:31 vm04 bash[34817]: audit 2026-04-16T19:29:30.893076+0000 mon.vm01 (mon.0) 1087 : audit [DBG] from='client.? 192.168.123.101:0/1536813178' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:31.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:31 vm04 bash[34817]: audit 2026-04-16T19:29:30.893076+0000 mon.vm01 (mon.0) 1087 : audit [DBG] from='client.? 192.168.123.101:0/1536813178' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:31.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:31 vm01 bash[28222]: cluster 2026-04-16T19:29:30.314636+0000 mgr.vm01.nwhpas (mgr.14227) 496 : cluster [DBG] pgmap v264: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:31.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:31 vm01 bash[28222]: cluster 2026-04-16T19:29:30.314636+0000 mgr.vm01.nwhpas (mgr.14227) 496 : cluster [DBG] pgmap v264: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:31.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:31 vm01 bash[28222]: audit 2026-04-16T19:29:30.499320+0000 mgr.vm01.nwhpas (mgr.14227) 497 : audit [DBG] from='client.15544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:31.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:31 vm01 bash[28222]: audit 2026-04-16T19:29:30.499320+0000 mgr.vm01.nwhpas (mgr.14227) 497 : audit [DBG] from='client.15544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:31.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:31 vm01 bash[28222]: audit 2026-04-16T19:29:30.679154+0000 mgr.vm01.nwhpas (mgr.14227) 498 : audit [DBG] from='client.15548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:31.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:31 vm01 bash[28222]: audit 2026-04-16T19:29:30.679154+0000 mgr.vm01.nwhpas (mgr.14227) 498 : audit [DBG] from='client.15548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:31.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:31 vm01 bash[28222]: audit 2026-04-16T19:29:30.893076+0000 mon.vm01 (mon.0) 1087 : audit [DBG] from='client.? 192.168.123.101:0/1536813178' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:31.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:31 vm01 bash[28222]: audit 2026-04-16T19:29:30.893076+0000 mon.vm01 (mon.0) 1087 : audit [DBG] from='client.? 192.168.123.101:0/1536813178' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:33.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:33 vm04 bash[34817]: cluster 2026-04-16T19:29:32.314996+0000 mgr.vm01.nwhpas (mgr.14227) 499 : cluster [DBG] pgmap v265: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:33.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:33 vm04 bash[34817]: cluster 2026-04-16T19:29:32.314996+0000 mgr.vm01.nwhpas (mgr.14227) 499 : cluster [DBG] pgmap v265: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:33.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:33 vm01 bash[28222]: cluster 2026-04-16T19:29:32.314996+0000 mgr.vm01.nwhpas (mgr.14227) 499 : cluster [DBG] pgmap v265: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:33.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:33 vm01 bash[28222]: cluster 2026-04-16T19:29:32.314996+0000 mgr.vm01.nwhpas (mgr.14227) 499 : cluster [DBG] pgmap v265: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: cluster 2026-04-16T19:29:34.315379+0000 mgr.vm01.nwhpas (mgr.14227) 500 : cluster [DBG] pgmap v266: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: cluster 2026-04-16T19:29:34.315379+0000 mgr.vm01.nwhpas (mgr.14227) 500 : cluster [DBG] pgmap v266: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.337589+0000 mon.vm01 (mon.0) 1088 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.337589+0000 mon.vm01 (mon.0) 1088 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.343226+0000 mon.vm01 (mon.0) 1089 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.343226+0000 mon.vm01 (mon.0) 1089 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.344084+0000 mon.vm01 (mon.0) 1090 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.344084+0000 mon.vm01 (mon.0) 1090 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.344605+0000 mon.vm01 (mon.0) 1091 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.344605+0000 mon.vm01 (mon.0) 1091 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: cluster 2026-04-16T19:29:34.345581+0000 mgr.vm01.nwhpas (mgr.14227) 501 : cluster [DBG] pgmap v267: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: cluster 2026-04-16T19:29:34.345581+0000 mgr.vm01.nwhpas (mgr.14227) 501 : cluster [DBG] pgmap v267: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.348327+0000 mon.vm01 (mon.0) 1092 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.348327+0000 mon.vm01 (mon.0) 1092 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.349573+0000 mon.vm01 (mon.0) 1093 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: audit 2026-04-16T19:29:34.349573+0000 mon.vm01 (mon.0) 1093 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: cluster 2026-04-16T19:29:34.613078+0000 mon.vm01 (mon.0) 1094 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:29:35.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:35 vm04 bash[34817]: cluster 2026-04-16T19:29:34.613078+0000 mon.vm01 (mon.0) 1094 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: cluster 2026-04-16T19:29:34.315379+0000 mgr.vm01.nwhpas (mgr.14227) 500 : cluster [DBG] pgmap v266: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: cluster 2026-04-16T19:29:34.315379+0000 mgr.vm01.nwhpas (mgr.14227) 500 : cluster [DBG] pgmap v266: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.337589+0000 mon.vm01 (mon.0) 1088 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.337589+0000 mon.vm01 (mon.0) 1088 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.343226+0000 mon.vm01 (mon.0) 1089 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.343226+0000 mon.vm01 (mon.0) 1089 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.344084+0000 mon.vm01 (mon.0) 1090 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.344084+0000 mon.vm01 (mon.0) 1090 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.344605+0000 mon.vm01 (mon.0) 1091 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.344605+0000 mon.vm01 (mon.0) 1091 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: cluster 2026-04-16T19:29:34.345581+0000 mgr.vm01.nwhpas (mgr.14227) 501 : cluster [DBG] pgmap v267: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: cluster 2026-04-16T19:29:34.345581+0000 mgr.vm01.nwhpas (mgr.14227) 501 : cluster [DBG] pgmap v267: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.348327+0000 mon.vm01 (mon.0) 1092 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.348327+0000 mon.vm01 (mon.0) 1092 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:35.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.349573+0000 mon.vm01 (mon.0) 1093 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:35.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: audit 2026-04-16T19:29:34.349573+0000 mon.vm01 (mon.0) 1093 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:29:35.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: cluster 2026-04-16T19:29:34.613078+0000 mon.vm01 (mon.0) 1094 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:29:35.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:35 vm01 bash[28222]: cluster 2026-04-16T19:29:34.613078+0000 mon.vm01 (mon.0) 1094 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:29:36.094 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:29:36.266 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:36.266 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 17s ago 6m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:36.266 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (23s) 17s ago 6m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:29:36.266 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 1s ago 6m - - 2026-04-16T19:29:36.266 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (6m) 1s ago 6m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:36.485 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:29:36.486 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:29:36.486 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:29:37.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:37 vm04 bash[34817]: audit 2026-04-16T19:29:36.074869+0000 mgr.vm01.nwhpas (mgr.14227) 502 : audit [DBG] from='client.15556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:37.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:37 vm04 bash[34817]: audit 2026-04-16T19:29:36.074869+0000 mgr.vm01.nwhpas (mgr.14227) 502 : audit [DBG] from='client.15556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:37.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:37 vm04 bash[34817]: audit 2026-04-16T19:29:36.259124+0000 mgr.vm01.nwhpas (mgr.14227) 503 : audit [DBG] from='client.15560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:37.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:37 vm04 bash[34817]: audit 2026-04-16T19:29:36.259124+0000 mgr.vm01.nwhpas (mgr.14227) 503 : audit [DBG] from='client.15560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:37.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:37 vm04 bash[34817]: cluster 2026-04-16T19:29:36.345984+0000 mgr.vm01.nwhpas (mgr.14227) 504 : cluster [DBG] pgmap v268: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:37.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:37 vm04 bash[34817]: cluster 2026-04-16T19:29:36.345984+0000 mgr.vm01.nwhpas (mgr.14227) 504 : cluster [DBG] pgmap v268: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:37.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:37 vm04 bash[34817]: audit 2026-04-16T19:29:36.481167+0000 mon.vm01 (mon.0) 1095 : audit [DBG] from='client.? 192.168.123.101:0/1274053914' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:37.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:37 vm04 bash[34817]: audit 2026-04-16T19:29:36.481167+0000 mon.vm01 (mon.0) 1095 : audit [DBG] from='client.? 192.168.123.101:0/1274053914' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:37 vm01 bash[28222]: audit 2026-04-16T19:29:36.074869+0000 mgr.vm01.nwhpas (mgr.14227) 502 : audit [DBG] from='client.15556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:37 vm01 bash[28222]: audit 2026-04-16T19:29:36.074869+0000 mgr.vm01.nwhpas (mgr.14227) 502 : audit [DBG] from='client.15556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:37 vm01 bash[28222]: audit 2026-04-16T19:29:36.259124+0000 mgr.vm01.nwhpas (mgr.14227) 503 : audit [DBG] from='client.15560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:37 vm01 bash[28222]: audit 2026-04-16T19:29:36.259124+0000 mgr.vm01.nwhpas (mgr.14227) 503 : audit [DBG] from='client.15560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:37 vm01 bash[28222]: cluster 2026-04-16T19:29:36.345984+0000 mgr.vm01.nwhpas (mgr.14227) 504 : cluster [DBG] pgmap v268: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:37 vm01 bash[28222]: cluster 2026-04-16T19:29:36.345984+0000 mgr.vm01.nwhpas (mgr.14227) 504 : cluster [DBG] pgmap v268: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:37 vm01 bash[28222]: audit 2026-04-16T19:29:36.481167+0000 mon.vm01 (mon.0) 1095 : audit [DBG] from='client.? 192.168.123.101:0/1274053914' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:37 vm01 bash[28222]: audit 2026-04-16T19:29:36.481167+0000 mon.vm01 (mon.0) 1095 : audit [DBG] from='client.? 192.168.123.101:0/1274053914' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:38.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:38 vm04 bash[34817]: audit 2026-04-16T19:29:37.575477+0000 mon.vm01 (mon.0) 1096 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:38.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:38 vm04 bash[34817]: audit 2026-04-16T19:29:37.575477+0000 mon.vm01 (mon.0) 1096 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:38.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:38 vm04 bash[34817]: audit 2026-04-16T19:29:37.576062+0000 mon.vm01 (mon.0) 1097 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:38.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:38 vm04 bash[34817]: audit 2026-04-16T19:29:37.576062+0000 mon.vm01 (mon.0) 1097 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:38.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:38 vm01 bash[28222]: audit 2026-04-16T19:29:37.575477+0000 mon.vm01 (mon.0) 1096 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:38.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:38 vm01 bash[28222]: audit 2026-04-16T19:29:37.575477+0000 mon.vm01 (mon.0) 1096 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:29:38.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:38 vm01 bash[28222]: audit 2026-04-16T19:29:37.576062+0000 mon.vm01 (mon.0) 1097 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:38.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:38 vm01 bash[28222]: audit 2026-04-16T19:29:37.576062+0000 mon.vm01 (mon.0) 1097 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:39.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:39 vm04 bash[34817]: cluster 2026-04-16T19:29:38.346374+0000 mgr.vm01.nwhpas (mgr.14227) 505 : cluster [DBG] pgmap v269: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:39.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:39 vm04 bash[34817]: cluster 2026-04-16T19:29:38.346374+0000 mgr.vm01.nwhpas (mgr.14227) 505 : cluster [DBG] pgmap v269: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:39.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:39 vm01 bash[28222]: cluster 2026-04-16T19:29:38.346374+0000 mgr.vm01.nwhpas (mgr.14227) 505 : cluster [DBG] pgmap v269: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:39.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:39 vm01 bash[28222]: cluster 2026-04-16T19:29:38.346374+0000 mgr.vm01.nwhpas (mgr.14227) 505 : cluster [DBG] pgmap v269: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:41.680 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:29:41.856 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:41.856 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 23s ago 6m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:41.856 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (28s) 23s ago 6m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:29:41.856 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 7s ago 6m - - 2026-04-16T19:29:41.856 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (6m) 7s ago 6m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:41.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:41 vm04 bash[34817]: cluster 2026-04-16T19:29:40.346836+0000 mgr.vm01.nwhpas (mgr.14227) 506 : cluster [DBG] pgmap v270: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:41.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:41 vm04 bash[34817]: cluster 2026-04-16T19:29:40.346836+0000 mgr.vm01.nwhpas (mgr.14227) 506 : cluster [DBG] pgmap v270: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:41.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:41 vm01 bash[28222]: cluster 2026-04-16T19:29:40.346836+0000 mgr.vm01.nwhpas (mgr.14227) 506 : cluster [DBG] pgmap v270: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:41.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:41 vm01 bash[28222]: cluster 2026-04-16T19:29:40.346836+0000 mgr.vm01.nwhpas (mgr.14227) 506 : cluster [DBG] pgmap v270: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:42.072 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:29:42.072 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:29:42.072 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:29:42.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:42 vm04 bash[34817]: audit 2026-04-16T19:29:41.659999+0000 mgr.vm01.nwhpas (mgr.14227) 507 : audit [DBG] from='client.15568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:42.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:42 vm04 bash[34817]: audit 2026-04-16T19:29:41.659999+0000 mgr.vm01.nwhpas (mgr.14227) 507 : audit [DBG] from='client.15568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:42.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:42 vm04 bash[34817]: audit 2026-04-16T19:29:41.849535+0000 mgr.vm01.nwhpas (mgr.14227) 508 : audit [DBG] from='client.15572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:42.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:42 vm04 bash[34817]: audit 2026-04-16T19:29:41.849535+0000 mgr.vm01.nwhpas (mgr.14227) 508 : audit [DBG] from='client.15572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:42.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:42 vm04 bash[34817]: audit 2026-04-16T19:29:42.067347+0000 mon.vm01 (mon.0) 1098 : audit [DBG] from='client.? 192.168.123.101:0/2629731613' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:42.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:42 vm04 bash[34817]: audit 2026-04-16T19:29:42.067347+0000 mon.vm01 (mon.0) 1098 : audit [DBG] from='client.? 192.168.123.101:0/2629731613' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:42.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:42 vm01 bash[28222]: audit 2026-04-16T19:29:41.659999+0000 mgr.vm01.nwhpas (mgr.14227) 507 : audit [DBG] from='client.15568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:42.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:42 vm01 bash[28222]: audit 2026-04-16T19:29:41.659999+0000 mgr.vm01.nwhpas (mgr.14227) 507 : audit [DBG] from='client.15568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:42.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:42 vm01 bash[28222]: audit 2026-04-16T19:29:41.849535+0000 mgr.vm01.nwhpas (mgr.14227) 508 : audit [DBG] from='client.15572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:42.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:42 vm01 bash[28222]: audit 2026-04-16T19:29:41.849535+0000 mgr.vm01.nwhpas (mgr.14227) 508 : audit [DBG] from='client.15572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:42.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:42 vm01 bash[28222]: audit 2026-04-16T19:29:42.067347+0000 mon.vm01 (mon.0) 1098 : audit [DBG] from='client.? 192.168.123.101:0/2629731613' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:42.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:42 vm01 bash[28222]: audit 2026-04-16T19:29:42.067347+0000 mon.vm01 (mon.0) 1098 : audit [DBG] from='client.? 192.168.123.101:0/2629731613' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:43.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:43 vm04 bash[34817]: cluster 2026-04-16T19:29:42.347255+0000 mgr.vm01.nwhpas (mgr.14227) 509 : cluster [DBG] pgmap v271: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:43.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:43 vm04 bash[34817]: cluster 2026-04-16T19:29:42.347255+0000 mgr.vm01.nwhpas (mgr.14227) 509 : cluster [DBG] pgmap v271: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:43.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:43 vm01 bash[28222]: cluster 2026-04-16T19:29:42.347255+0000 mgr.vm01.nwhpas (mgr.14227) 509 : cluster [DBG] pgmap v271: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:43.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:43 vm01 bash[28222]: cluster 2026-04-16T19:29:42.347255+0000 mgr.vm01.nwhpas (mgr.14227) 509 : cluster [DBG] pgmap v271: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:29:45.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:45 vm04 bash[34817]: cluster 2026-04-16T19:29:44.347664+0000 mgr.vm01.nwhpas (mgr.14227) 510 : cluster [DBG] pgmap v272: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:45.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:45 vm04 bash[34817]: cluster 2026-04-16T19:29:44.347664+0000 mgr.vm01.nwhpas (mgr.14227) 510 : cluster [DBG] pgmap v272: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:45.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:45 vm01 bash[28222]: cluster 2026-04-16T19:29:44.347664+0000 mgr.vm01.nwhpas (mgr.14227) 510 : cluster [DBG] pgmap v272: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:45.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:45 vm01 bash[28222]: cluster 2026-04-16T19:29:44.347664+0000 mgr.vm01.nwhpas (mgr.14227) 510 : cluster [DBG] pgmap v272: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:29:47.272 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:29:47.440 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:47.440 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 29s ago 6m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:47.440 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (34s) 29s ago 6m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:29:47.440 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 13s ago 6m - - 2026-04-16T19:29:47.440 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (6m) 13s ago 6m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:47.652 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:29:47.652 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:29:47.652 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:29:47.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:47 vm04 bash[34817]: cluster 2026-04-16T19:29:46.348098+0000 mgr.vm01.nwhpas (mgr.14227) 511 : cluster [DBG] pgmap v273: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:47.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:47 vm04 bash[34817]: cluster 2026-04-16T19:29:46.348098+0000 mgr.vm01.nwhpas (mgr.14227) 511 : cluster [DBG] pgmap v273: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:47.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:47 vm01 bash[28222]: cluster 2026-04-16T19:29:46.348098+0000 mgr.vm01.nwhpas (mgr.14227) 511 : cluster [DBG] pgmap v273: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:47.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:47 vm01 bash[28222]: cluster 2026-04-16T19:29:46.348098+0000 mgr.vm01.nwhpas (mgr.14227) 511 : cluster [DBG] pgmap v273: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:48.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:48 vm04 bash[34817]: audit 2026-04-16T19:29:47.253029+0000 mgr.vm01.nwhpas (mgr.14227) 512 : audit [DBG] from='client.15580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:48.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:48 vm04 bash[34817]: audit 2026-04-16T19:29:47.253029+0000 mgr.vm01.nwhpas (mgr.14227) 512 : audit [DBG] from='client.15580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:48.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:48 vm04 bash[34817]: audit 2026-04-16T19:29:47.433071+0000 mgr.vm01.nwhpas (mgr.14227) 513 : audit [DBG] from='client.15584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:48.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:48 vm04 bash[34817]: audit 2026-04-16T19:29:47.433071+0000 mgr.vm01.nwhpas (mgr.14227) 513 : audit [DBG] from='client.15584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:48.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:48 vm04 bash[34817]: audit 2026-04-16T19:29:47.647313+0000 mon.vm01 (mon.0) 1099 : audit [DBG] from='client.? 192.168.123.101:0/3276151627' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:48.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:48 vm04 bash[34817]: audit 2026-04-16T19:29:47.647313+0000 mon.vm01 (mon.0) 1099 : audit [DBG] from='client.? 192.168.123.101:0/3276151627' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:48.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:48 vm01 bash[28222]: audit 2026-04-16T19:29:47.253029+0000 mgr.vm01.nwhpas (mgr.14227) 512 : audit [DBG] from='client.15580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:48.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:48 vm01 bash[28222]: audit 2026-04-16T19:29:47.253029+0000 mgr.vm01.nwhpas (mgr.14227) 512 : audit [DBG] from='client.15580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:48.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:48 vm01 bash[28222]: audit 2026-04-16T19:29:47.433071+0000 mgr.vm01.nwhpas (mgr.14227) 513 : audit [DBG] from='client.15584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:48.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:48 vm01 bash[28222]: audit 2026-04-16T19:29:47.433071+0000 mgr.vm01.nwhpas (mgr.14227) 513 : audit [DBG] from='client.15584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:48.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:48 vm01 bash[28222]: audit 2026-04-16T19:29:47.647313+0000 mon.vm01 (mon.0) 1099 : audit [DBG] from='client.? 192.168.123.101:0/3276151627' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:48.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:48 vm01 bash[28222]: audit 2026-04-16T19:29:47.647313+0000 mon.vm01 (mon.0) 1099 : audit [DBG] from='client.? 192.168.123.101:0/3276151627' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:49.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:49 vm04 bash[34817]: cluster 2026-04-16T19:29:48.348473+0000 mgr.vm01.nwhpas (mgr.14227) 514 : cluster [DBG] pgmap v274: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:49.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:49 vm04 bash[34817]: cluster 2026-04-16T19:29:48.348473+0000 mgr.vm01.nwhpas (mgr.14227) 514 : cluster [DBG] pgmap v274: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:49.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:49 vm01 bash[28222]: cluster 2026-04-16T19:29:48.348473+0000 mgr.vm01.nwhpas (mgr.14227) 514 : cluster [DBG] pgmap v274: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:49.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:49 vm01 bash[28222]: cluster 2026-04-16T19:29:48.348473+0000 mgr.vm01.nwhpas (mgr.14227) 514 : cluster [DBG] pgmap v274: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:51 vm04 bash[34817]: cluster 2026-04-16T19:29:50.348851+0000 mgr.vm01.nwhpas (mgr.14227) 515 : cluster [DBG] pgmap v275: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:51.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:51 vm04 bash[34817]: cluster 2026-04-16T19:29:50.348851+0000 mgr.vm01.nwhpas (mgr.14227) 515 : cluster [DBG] pgmap v275: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:51 vm01 bash[28222]: cluster 2026-04-16T19:29:50.348851+0000 mgr.vm01.nwhpas (mgr.14227) 515 : cluster [DBG] pgmap v275: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:51 vm01 bash[28222]: cluster 2026-04-16T19:29:50.348851+0000 mgr.vm01.nwhpas (mgr.14227) 515 : cluster [DBG] pgmap v275: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:52.843 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:29:52.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:52 vm04 bash[34817]: audit 2026-04-16T19:29:52.572431+0000 mon.vm01 (mon.0) 1100 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:52.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:52 vm04 bash[34817]: audit 2026-04-16T19:29:52.572431+0000 mon.vm01 (mon.0) 1100 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:52.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:52 vm01 bash[28222]: audit 2026-04-16T19:29:52.572431+0000 mon.vm01 (mon.0) 1100 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:52.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:52 vm01 bash[28222]: audit 2026-04-16T19:29:52.572431+0000 mon.vm01 (mon.0) 1100 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:29:53.031 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:53.031 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (5m) 34s ago 6m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:53.031 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (39s) 34s ago 6m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:29:53.031 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 18s ago 6m - - 2026-04-16T19:29:53.031 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (6m) 18s ago 6m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:53.254 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:29:53.254 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:29:53.254 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:29:53.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:53 vm04 bash[34817]: cluster 2026-04-16T19:29:52.349214+0000 mgr.vm01.nwhpas (mgr.14227) 516 : cluster [DBG] pgmap v276: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:53.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:53 vm04 bash[34817]: cluster 2026-04-16T19:29:52.349214+0000 mgr.vm01.nwhpas (mgr.14227) 516 : cluster [DBG] pgmap v276: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:53.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:53 vm04 bash[34817]: audit 2026-04-16T19:29:52.823119+0000 mgr.vm01.nwhpas (mgr.14227) 517 : audit [DBG] from='client.15592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:53.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:53 vm04 bash[34817]: audit 2026-04-16T19:29:52.823119+0000 mgr.vm01.nwhpas (mgr.14227) 517 : audit [DBG] from='client.15592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:53.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:53 vm04 bash[34817]: audit 2026-04-16T19:29:53.249910+0000 mon.vm01 (mon.0) 1101 : audit [DBG] from='client.? 192.168.123.101:0/3742240764' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:53.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:53 vm04 bash[34817]: audit 2026-04-16T19:29:53.249910+0000 mon.vm01 (mon.0) 1101 : audit [DBG] from='client.? 192.168.123.101:0/3742240764' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:53.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:53 vm01 bash[28222]: cluster 2026-04-16T19:29:52.349214+0000 mgr.vm01.nwhpas (mgr.14227) 516 : cluster [DBG] pgmap v276: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:53.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:53 vm01 bash[28222]: cluster 2026-04-16T19:29:52.349214+0000 mgr.vm01.nwhpas (mgr.14227) 516 : cluster [DBG] pgmap v276: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:53.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:53 vm01 bash[28222]: audit 2026-04-16T19:29:52.823119+0000 mgr.vm01.nwhpas (mgr.14227) 517 : audit [DBG] from='client.15592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:53.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:53 vm01 bash[28222]: audit 2026-04-16T19:29:52.823119+0000 mgr.vm01.nwhpas (mgr.14227) 517 : audit [DBG] from='client.15592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:53.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:53 vm01 bash[28222]: audit 2026-04-16T19:29:53.249910+0000 mon.vm01 (mon.0) 1101 : audit [DBG] from='client.? 192.168.123.101:0/3742240764' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:53.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:53 vm01 bash[28222]: audit 2026-04-16T19:29:53.249910+0000 mon.vm01 (mon.0) 1101 : audit [DBG] from='client.? 192.168.123.101:0/3742240764' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:54.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:54 vm04 bash[34817]: audit 2026-04-16T19:29:53.024235+0000 mgr.vm01.nwhpas (mgr.14227) 518 : audit [DBG] from='client.15596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:54.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:54 vm04 bash[34817]: audit 2026-04-16T19:29:53.024235+0000 mgr.vm01.nwhpas (mgr.14227) 518 : audit [DBG] from='client.15596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:54.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:54 vm01 bash[28222]: audit 2026-04-16T19:29:53.024235+0000 mgr.vm01.nwhpas (mgr.14227) 518 : audit [DBG] from='client.15596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:54.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:54 vm01 bash[28222]: audit 2026-04-16T19:29:53.024235+0000 mgr.vm01.nwhpas (mgr.14227) 518 : audit [DBG] from='client.15596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:55.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:55 vm04 bash[34817]: cluster 2026-04-16T19:29:54.349684+0000 mgr.vm01.nwhpas (mgr.14227) 519 : cluster [DBG] pgmap v277: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:55.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:55 vm04 bash[34817]: cluster 2026-04-16T19:29:54.349684+0000 mgr.vm01.nwhpas (mgr.14227) 519 : cluster [DBG] pgmap v277: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:55.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:55 vm01 bash[28222]: cluster 2026-04-16T19:29:54.349684+0000 mgr.vm01.nwhpas (mgr.14227) 519 : cluster [DBG] pgmap v277: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:55.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:55 vm01 bash[28222]: cluster 2026-04-16T19:29:54.349684+0000 mgr.vm01.nwhpas (mgr.14227) 519 : cluster [DBG] pgmap v277: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:57.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:57 vm04 bash[34817]: cluster 2026-04-16T19:29:56.350083+0000 mgr.vm01.nwhpas (mgr.14227) 520 : cluster [DBG] pgmap v278: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:57.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:57 vm04 bash[34817]: cluster 2026-04-16T19:29:56.350083+0000 mgr.vm01.nwhpas (mgr.14227) 520 : cluster [DBG] pgmap v278: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:57.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:57 vm01 bash[28222]: cluster 2026-04-16T19:29:56.350083+0000 mgr.vm01.nwhpas (mgr.14227) 520 : cluster [DBG] pgmap v278: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:57.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:57 vm01 bash[28222]: cluster 2026-04-16T19:29:56.350083+0000 mgr.vm01.nwhpas (mgr.14227) 520 : cluster [DBG] pgmap v278: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:29:58.452 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:29:58.635 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:29:58.635 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (6m) 40s ago 6m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:29:58.635 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (45s) 40s ago 6m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:29:58.635 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 24s ago 6m - - 2026-04-16T19:29:58.635 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (6m) 24s ago 6m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:29:58.862 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:29:58.862 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:29:58.862 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:29:59.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:59 vm01 bash[28222]: cluster 2026-04-16T19:29:58.350415+0000 mgr.vm01.nwhpas (mgr.14227) 521 : cluster [DBG] pgmap v279: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:59.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:59 vm01 bash[28222]: cluster 2026-04-16T19:29:58.350415+0000 mgr.vm01.nwhpas (mgr.14227) 521 : cluster [DBG] pgmap v279: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:59.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:59 vm01 bash[28222]: audit 2026-04-16T19:29:58.430888+0000 mgr.vm01.nwhpas (mgr.14227) 522 : audit [DBG] from='client.15604 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:59.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:59 vm01 bash[28222]: audit 2026-04-16T19:29:58.430888+0000 mgr.vm01.nwhpas (mgr.14227) 522 : audit [DBG] from='client.15604 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:59.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:59 vm01 bash[28222]: audit 2026-04-16T19:29:58.627852+0000 mgr.vm01.nwhpas (mgr.14227) 523 : audit [DBG] from='client.15608 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:59.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:59 vm01 bash[28222]: audit 2026-04-16T19:29:58.627852+0000 mgr.vm01.nwhpas (mgr.14227) 523 : audit [DBG] from='client.15608 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:59.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:59 vm01 bash[28222]: audit 2026-04-16T19:29:58.857709+0000 mon.vm01 (mon.0) 1102 : audit [DBG] from='client.? 192.168.123.101:0/3518527106' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:59.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:29:59 vm01 bash[28222]: audit 2026-04-16T19:29:58.857709+0000 mon.vm01 (mon.0) 1102 : audit [DBG] from='client.? 192.168.123.101:0/3518527106' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:59.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:59 vm04 bash[34817]: cluster 2026-04-16T19:29:58.350415+0000 mgr.vm01.nwhpas (mgr.14227) 521 : cluster [DBG] pgmap v279: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:59.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:59 vm04 bash[34817]: cluster 2026-04-16T19:29:58.350415+0000 mgr.vm01.nwhpas (mgr.14227) 521 : cluster [DBG] pgmap v279: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:29:59.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:59 vm04 bash[34817]: audit 2026-04-16T19:29:58.430888+0000 mgr.vm01.nwhpas (mgr.14227) 522 : audit [DBG] from='client.15604 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:59.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:59 vm04 bash[34817]: audit 2026-04-16T19:29:58.430888+0000 mgr.vm01.nwhpas (mgr.14227) 522 : audit [DBG] from='client.15604 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:59.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:59 vm04 bash[34817]: audit 2026-04-16T19:29:58.627852+0000 mgr.vm01.nwhpas (mgr.14227) 523 : audit [DBG] from='client.15608 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:59.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:59 vm04 bash[34817]: audit 2026-04-16T19:29:58.627852+0000 mgr.vm01.nwhpas (mgr.14227) 523 : audit [DBG] from='client.15608 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:29:59.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:59 vm04 bash[34817]: audit 2026-04-16T19:29:58.857709+0000 mon.vm01 (mon.0) 1102 : audit [DBG] from='client.? 192.168.123.101:0/3518527106' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:29:59.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:29:59 vm04 bash[34817]: audit 2026-04-16T19:29:58.857709+0000 mon.vm01 (mon.0) 1102 : audit [DBG] from='client.? 192.168.123.101:0/3518527106' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:01.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:01 vm04 bash[34817]: cluster 2026-04-16T19:30:00.000105+0000 mon.vm01 (mon.0) 1103 : cluster [WRN] Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:01.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:01 vm04 bash[34817]: cluster 2026-04-16T19:30:00.000105+0000 mon.vm01 (mon.0) 1103 : cluster [WRN] Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:01.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:01 vm04 bash[34817]: cluster 2026-04-16T19:30:00.000124+0000 mon.vm01 (mon.0) 1104 : cluster [WRN] [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:01.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:01 vm04 bash[34817]: cluster 2026-04-16T19:30:00.000124+0000 mon.vm01 (mon.0) 1104 : cluster [WRN] [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:01.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:01 vm04 bash[34817]: cluster 2026-04-16T19:30:00.000130+0000 mon.vm01 (mon.0) 1105 : cluster [WRN] daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:01.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:01 vm04 bash[34817]: cluster 2026-04-16T19:30:00.000130+0000 mon.vm01 (mon.0) 1105 : cluster [WRN] daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:01.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:01 vm01 bash[28222]: cluster 2026-04-16T19:30:00.000105+0000 mon.vm01 (mon.0) 1103 : cluster [WRN] Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:01.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:01 vm01 bash[28222]: cluster 2026-04-16T19:30:00.000105+0000 mon.vm01 (mon.0) 1103 : cluster [WRN] Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:01.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:01 vm01 bash[28222]: cluster 2026-04-16T19:30:00.000124+0000 mon.vm01 (mon.0) 1104 : cluster [WRN] [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:01.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:01 vm01 bash[28222]: cluster 2026-04-16T19:30:00.000124+0000 mon.vm01 (mon.0) 1104 : cluster [WRN] [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:01.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:01 vm01 bash[28222]: cluster 2026-04-16T19:30:00.000130+0000 mon.vm01 (mon.0) 1105 : cluster [WRN] daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:01.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:01 vm01 bash[28222]: cluster 2026-04-16T19:30:00.000130+0000 mon.vm01 (mon.0) 1105 : cluster [WRN] daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:02.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:02 vm04 bash[34817]: cluster 2026-04-16T19:30:00.350856+0000 mgr.vm01.nwhpas (mgr.14227) 524 : cluster [DBG] pgmap v280: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:02.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:02 vm04 bash[34817]: cluster 2026-04-16T19:30:00.350856+0000 mgr.vm01.nwhpas (mgr.14227) 524 : cluster [DBG] pgmap v280: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:02.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:02 vm01 bash[28222]: cluster 2026-04-16T19:30:00.350856+0000 mgr.vm01.nwhpas (mgr.14227) 524 : cluster [DBG] pgmap v280: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:02.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:02 vm01 bash[28222]: cluster 2026-04-16T19:30:00.350856+0000 mgr.vm01.nwhpas (mgr.14227) 524 : cluster [DBG] pgmap v280: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:03.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:03 vm04 bash[34817]: cluster 2026-04-16T19:30:02.351232+0000 mgr.vm01.nwhpas (mgr.14227) 525 : cluster [DBG] pgmap v281: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:03.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:03 vm04 bash[34817]: cluster 2026-04-16T19:30:02.351232+0000 mgr.vm01.nwhpas (mgr.14227) 525 : cluster [DBG] pgmap v281: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:03.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:03 vm01 bash[28222]: cluster 2026-04-16T19:30:02.351232+0000 mgr.vm01.nwhpas (mgr.14227) 525 : cluster [DBG] pgmap v281: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:03.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:03 vm01 bash[28222]: cluster 2026-04-16T19:30:02.351232+0000 mgr.vm01.nwhpas (mgr.14227) 525 : cluster [DBG] pgmap v281: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:04.055 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:30:04.225 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:30:04.225 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (6m) 45s ago 7m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:30:04.225 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (51s) 45s ago 6m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:30:04.225 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 29s ago 6m - - 2026-04-16T19:30:04.225 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (7m) 29s ago 7m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:30:04.438 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:04.438 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:04.438 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:05.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:05 vm04 bash[34817]: audit 2026-04-16T19:30:04.035613+0000 mgr.vm01.nwhpas (mgr.14227) 526 : audit [DBG] from='client.15616 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:05.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:05 vm04 bash[34817]: audit 2026-04-16T19:30:04.035613+0000 mgr.vm01.nwhpas (mgr.14227) 526 : audit [DBG] from='client.15616 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:05.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:05 vm04 bash[34817]: audit 2026-04-16T19:30:04.217976+0000 mgr.vm01.nwhpas (mgr.14227) 527 : audit [DBG] from='client.15620 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:05.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:05 vm04 bash[34817]: audit 2026-04-16T19:30:04.217976+0000 mgr.vm01.nwhpas (mgr.14227) 527 : audit [DBG] from='client.15620 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:05.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:05 vm04 bash[34817]: cluster 2026-04-16T19:30:04.351684+0000 mgr.vm01.nwhpas (mgr.14227) 528 : cluster [DBG] pgmap v282: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:05.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:05 vm04 bash[34817]: cluster 2026-04-16T19:30:04.351684+0000 mgr.vm01.nwhpas (mgr.14227) 528 : cluster [DBG] pgmap v282: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:05.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:05 vm04 bash[34817]: audit 2026-04-16T19:30:04.433813+0000 mon.vm01 (mon.0) 1106 : audit [DBG] from='client.? 192.168.123.101:0/3155371025' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:05.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:05 vm04 bash[34817]: audit 2026-04-16T19:30:04.433813+0000 mon.vm01 (mon.0) 1106 : audit [DBG] from='client.? 192.168.123.101:0/3155371025' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:05.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:05 vm01 bash[28222]: audit 2026-04-16T19:30:04.035613+0000 mgr.vm01.nwhpas (mgr.14227) 526 : audit [DBG] from='client.15616 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:05.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:05 vm01 bash[28222]: audit 2026-04-16T19:30:04.035613+0000 mgr.vm01.nwhpas (mgr.14227) 526 : audit [DBG] from='client.15616 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:05.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:05 vm01 bash[28222]: audit 2026-04-16T19:30:04.217976+0000 mgr.vm01.nwhpas (mgr.14227) 527 : audit [DBG] from='client.15620 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:05.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:05 vm01 bash[28222]: audit 2026-04-16T19:30:04.217976+0000 mgr.vm01.nwhpas (mgr.14227) 527 : audit [DBG] from='client.15620 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:05.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:05 vm01 bash[28222]: cluster 2026-04-16T19:30:04.351684+0000 mgr.vm01.nwhpas (mgr.14227) 528 : cluster [DBG] pgmap v282: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:05.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:05 vm01 bash[28222]: cluster 2026-04-16T19:30:04.351684+0000 mgr.vm01.nwhpas (mgr.14227) 528 : cluster [DBG] pgmap v282: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:05.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:05 vm01 bash[28222]: audit 2026-04-16T19:30:04.433813+0000 mon.vm01 (mon.0) 1106 : audit [DBG] from='client.? 192.168.123.101:0/3155371025' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:05.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:05 vm01 bash[28222]: audit 2026-04-16T19:30:04.433813+0000 mon.vm01 (mon.0) 1106 : audit [DBG] from='client.? 192.168.123.101:0/3155371025' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:07.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:07 vm04 bash[34817]: cluster 2026-04-16T19:30:06.352045+0000 mgr.vm01.nwhpas (mgr.14227) 529 : cluster [DBG] pgmap v283: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:07.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:07 vm04 bash[34817]: cluster 2026-04-16T19:30:06.352045+0000 mgr.vm01.nwhpas (mgr.14227) 529 : cluster [DBG] pgmap v283: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:07.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:07 vm01 bash[28222]: cluster 2026-04-16T19:30:06.352045+0000 mgr.vm01.nwhpas (mgr.14227) 529 : cluster [DBG] pgmap v283: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:07.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:07 vm01 bash[28222]: cluster 2026-04-16T19:30:06.352045+0000 mgr.vm01.nwhpas (mgr.14227) 529 : cluster [DBG] pgmap v283: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:08.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:08 vm04 bash[34817]: audit 2026-04-16T19:30:07.572604+0000 mon.vm01 (mon.0) 1107 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:08.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:08 vm04 bash[34817]: audit 2026-04-16T19:30:07.572604+0000 mon.vm01 (mon.0) 1107 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:08.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:08 vm01 bash[28222]: audit 2026-04-16T19:30:07.572604+0000 mon.vm01 (mon.0) 1107 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:08.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:08 vm01 bash[28222]: audit 2026-04-16T19:30:07.572604+0000 mon.vm01 (mon.0) 1107 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:09.635 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:30:09.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:09 vm04 bash[34817]: cluster 2026-04-16T19:30:08.352415+0000 mgr.vm01.nwhpas (mgr.14227) 530 : cluster [DBG] pgmap v284: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:09.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:09 vm04 bash[34817]: cluster 2026-04-16T19:30:08.352415+0000 mgr.vm01.nwhpas (mgr.14227) 530 : cluster [DBG] pgmap v284: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:09.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:09 vm01 bash[28222]: cluster 2026-04-16T19:30:08.352415+0000 mgr.vm01.nwhpas (mgr.14227) 530 : cluster [DBG] pgmap v284: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:09.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:09 vm01 bash[28222]: cluster 2026-04-16T19:30:08.352415+0000 mgr.vm01.nwhpas (mgr.14227) 530 : cluster [DBG] pgmap v284: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:09.807 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:30:09.807 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (6m) 51s ago 7m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:30:09.807 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (56s) 51s ago 7m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:30:09.807 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 35s ago 7m - - 2026-04-16T19:30:09.807 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (7m) 35s ago 7m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:30:10.031 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:10.031 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:10.031 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:10.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:10 vm04 bash[34817]: audit 2026-04-16T19:30:09.614421+0000 mgr.vm01.nwhpas (mgr.14227) 531 : audit [DBG] from='client.15628 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:10.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:10 vm04 bash[34817]: audit 2026-04-16T19:30:09.614421+0000 mgr.vm01.nwhpas (mgr.14227) 531 : audit [DBG] from='client.15628 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:10.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:10 vm04 bash[34817]: audit 2026-04-16T19:30:09.800246+0000 mgr.vm01.nwhpas (mgr.14227) 532 : audit [DBG] from='client.15632 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:10.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:10 vm04 bash[34817]: audit 2026-04-16T19:30:09.800246+0000 mgr.vm01.nwhpas (mgr.14227) 532 : audit [DBG] from='client.15632 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:10.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:10 vm04 bash[34817]: audit 2026-04-16T19:30:10.026409+0000 mon.vm01 (mon.0) 1108 : audit [DBG] from='client.? 192.168.123.101:0/1249181707' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:10.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:10 vm04 bash[34817]: audit 2026-04-16T19:30:10.026409+0000 mon.vm01 (mon.0) 1108 : audit [DBG] from='client.? 192.168.123.101:0/1249181707' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:10.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:10 vm01 bash[28222]: audit 2026-04-16T19:30:09.614421+0000 mgr.vm01.nwhpas (mgr.14227) 531 : audit [DBG] from='client.15628 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:10.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:10 vm01 bash[28222]: audit 2026-04-16T19:30:09.614421+0000 mgr.vm01.nwhpas (mgr.14227) 531 : audit [DBG] from='client.15628 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:10.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:10 vm01 bash[28222]: audit 2026-04-16T19:30:09.800246+0000 mgr.vm01.nwhpas (mgr.14227) 532 : audit [DBG] from='client.15632 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:10.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:10 vm01 bash[28222]: audit 2026-04-16T19:30:09.800246+0000 mgr.vm01.nwhpas (mgr.14227) 532 : audit [DBG] from='client.15632 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:10.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:10 vm01 bash[28222]: audit 2026-04-16T19:30:10.026409+0000 mon.vm01 (mon.0) 1108 : audit [DBG] from='client.? 192.168.123.101:0/1249181707' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:10.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:10 vm01 bash[28222]: audit 2026-04-16T19:30:10.026409+0000 mon.vm01 (mon.0) 1108 : audit [DBG] from='client.? 192.168.123.101:0/1249181707' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:11.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:11 vm04 bash[34817]: cluster 2026-04-16T19:30:10.352779+0000 mgr.vm01.nwhpas (mgr.14227) 533 : cluster [DBG] pgmap v285: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:11.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:11 vm04 bash[34817]: cluster 2026-04-16T19:30:10.352779+0000 mgr.vm01.nwhpas (mgr.14227) 533 : cluster [DBG] pgmap v285: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:11.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:11 vm01 bash[28222]: cluster 2026-04-16T19:30:10.352779+0000 mgr.vm01.nwhpas (mgr.14227) 533 : cluster [DBG] pgmap v285: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:11.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:11 vm01 bash[28222]: cluster 2026-04-16T19:30:10.352779+0000 mgr.vm01.nwhpas (mgr.14227) 533 : cluster [DBG] pgmap v285: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:13.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:13 vm04 bash[34817]: cluster 2026-04-16T19:30:12.353125+0000 mgr.vm01.nwhpas (mgr.14227) 534 : cluster [DBG] pgmap v286: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:13.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:13 vm04 bash[34817]: cluster 2026-04-16T19:30:12.353125+0000 mgr.vm01.nwhpas (mgr.14227) 534 : cluster [DBG] pgmap v286: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:13.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:13 vm01 bash[28222]: cluster 2026-04-16T19:30:12.353125+0000 mgr.vm01.nwhpas (mgr.14227) 534 : cluster [DBG] pgmap v286: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:13.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:13 vm01 bash[28222]: cluster 2026-04-16T19:30:12.353125+0000 mgr.vm01.nwhpas (mgr.14227) 534 : cluster [DBG] pgmap v286: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:15.239 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:30:15.411 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:30:15.411 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (6m) 57s ago 7m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:30:15.411 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (62s) 57s ago 7m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:30:15.411 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 41s ago 7m - - 2026-04-16T19:30:15.411 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (7m) 41s ago 7m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:30:15.642 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:15.642 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:15.642 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:15.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:15 vm04 bash[34817]: cluster 2026-04-16T19:30:14.353601+0000 mgr.vm01.nwhpas (mgr.14227) 535 : cluster [DBG] pgmap v287: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:15.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:15 vm04 bash[34817]: cluster 2026-04-16T19:30:14.353601+0000 mgr.vm01.nwhpas (mgr.14227) 535 : cluster [DBG] pgmap v287: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:15.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:15 vm01 bash[28222]: cluster 2026-04-16T19:30:14.353601+0000 mgr.vm01.nwhpas (mgr.14227) 535 : cluster [DBG] pgmap v287: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:15.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:15 vm01 bash[28222]: cluster 2026-04-16T19:30:14.353601+0000 mgr.vm01.nwhpas (mgr.14227) 535 : cluster [DBG] pgmap v287: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:16.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:16 vm04 bash[34817]: audit 2026-04-16T19:30:15.218347+0000 mgr.vm01.nwhpas (mgr.14227) 536 : audit [DBG] from='client.25027 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:16.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:16 vm04 bash[34817]: audit 2026-04-16T19:30:15.218347+0000 mgr.vm01.nwhpas (mgr.14227) 536 : audit [DBG] from='client.25027 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:16.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:16 vm04 bash[34817]: audit 2026-04-16T19:30:15.403915+0000 mgr.vm01.nwhpas (mgr.14227) 537 : audit [DBG] from='client.15644 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:16.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:16 vm04 bash[34817]: audit 2026-04-16T19:30:15.403915+0000 mgr.vm01.nwhpas (mgr.14227) 537 : audit [DBG] from='client.15644 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:16.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:16 vm04 bash[34817]: audit 2026-04-16T19:30:15.637263+0000 mon.vm01 (mon.0) 1109 : audit [DBG] from='client.? 192.168.123.101:0/3551652215' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:16.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:16 vm04 bash[34817]: audit 2026-04-16T19:30:15.637263+0000 mon.vm01 (mon.0) 1109 : audit [DBG] from='client.? 192.168.123.101:0/3551652215' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:16.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:16 vm01 bash[28222]: audit 2026-04-16T19:30:15.218347+0000 mgr.vm01.nwhpas (mgr.14227) 536 : audit [DBG] from='client.25027 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:16.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:16 vm01 bash[28222]: audit 2026-04-16T19:30:15.218347+0000 mgr.vm01.nwhpas (mgr.14227) 536 : audit [DBG] from='client.25027 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:16.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:16 vm01 bash[28222]: audit 2026-04-16T19:30:15.403915+0000 mgr.vm01.nwhpas (mgr.14227) 537 : audit [DBG] from='client.15644 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:16.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:16 vm01 bash[28222]: audit 2026-04-16T19:30:15.403915+0000 mgr.vm01.nwhpas (mgr.14227) 537 : audit [DBG] from='client.15644 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:16.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:16 vm01 bash[28222]: audit 2026-04-16T19:30:15.637263+0000 mon.vm01 (mon.0) 1109 : audit [DBG] from='client.? 192.168.123.101:0/3551652215' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:16.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:16 vm01 bash[28222]: audit 2026-04-16T19:30:15.637263+0000 mon.vm01 (mon.0) 1109 : audit [DBG] from='client.? 192.168.123.101:0/3551652215' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:17.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:17 vm04 bash[34817]: cluster 2026-04-16T19:30:16.353988+0000 mgr.vm01.nwhpas (mgr.14227) 538 : cluster [DBG] pgmap v288: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:17.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:17 vm04 bash[34817]: cluster 2026-04-16T19:30:16.353988+0000 mgr.vm01.nwhpas (mgr.14227) 538 : cluster [DBG] pgmap v288: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:17.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:17 vm01 bash[28222]: cluster 2026-04-16T19:30:16.353988+0000 mgr.vm01.nwhpas (mgr.14227) 538 : cluster [DBG] pgmap v288: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:17.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:17 vm01 bash[28222]: cluster 2026-04-16T19:30:16.353988+0000 mgr.vm01.nwhpas (mgr.14227) 538 : cluster [DBG] pgmap v288: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:19.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:19 vm04 bash[34817]: cluster 2026-04-16T19:30:18.354403+0000 mgr.vm01.nwhpas (mgr.14227) 539 : cluster [DBG] pgmap v289: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:19.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:19 vm04 bash[34817]: cluster 2026-04-16T19:30:18.354403+0000 mgr.vm01.nwhpas (mgr.14227) 539 : cluster [DBG] pgmap v289: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:19.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:19 vm01 bash[28222]: cluster 2026-04-16T19:30:18.354403+0000 mgr.vm01.nwhpas (mgr.14227) 539 : cluster [DBG] pgmap v289: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:19.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:19 vm01 bash[28222]: cluster 2026-04-16T19:30:18.354403+0000 mgr.vm01.nwhpas (mgr.14227) 539 : cluster [DBG] pgmap v289: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:20.835 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:30:21.009 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:30:21.009 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (6m) 62s ago 7m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:30:21.009 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (67s) 62s ago 7m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:30:21.009 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 46s ago 7m - - 2026-04-16T19:30:21.009 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (7m) 46s ago 7m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:30:21.223 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:21.224 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:21.224 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:21.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:21 vm04 bash[34817]: cluster 2026-04-16T19:30:20.354861+0000 mgr.vm01.nwhpas (mgr.14227) 540 : cluster [DBG] pgmap v290: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:21.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:21 vm04 bash[34817]: cluster 2026-04-16T19:30:20.354861+0000 mgr.vm01.nwhpas (mgr.14227) 540 : cluster [DBG] pgmap v290: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:21.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:21 vm04 bash[34817]: audit 2026-04-16T19:30:20.814593+0000 mgr.vm01.nwhpas (mgr.14227) 541 : audit [DBG] from='client.15652 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:21.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:21 vm04 bash[34817]: audit 2026-04-16T19:30:20.814593+0000 mgr.vm01.nwhpas (mgr.14227) 541 : audit [DBG] from='client.15652 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:21.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:21 vm04 bash[34817]: audit 2026-04-16T19:30:21.219023+0000 mon.vm01 (mon.0) 1110 : audit [DBG] from='client.? 192.168.123.101:0/473037822' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:21.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:21 vm04 bash[34817]: audit 2026-04-16T19:30:21.219023+0000 mon.vm01 (mon.0) 1110 : audit [DBG] from='client.? 192.168.123.101:0/473037822' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:21.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:21 vm01 bash[28222]: cluster 2026-04-16T19:30:20.354861+0000 mgr.vm01.nwhpas (mgr.14227) 540 : cluster [DBG] pgmap v290: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:21.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:21 vm01 bash[28222]: cluster 2026-04-16T19:30:20.354861+0000 mgr.vm01.nwhpas (mgr.14227) 540 : cluster [DBG] pgmap v290: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:21.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:21 vm01 bash[28222]: audit 2026-04-16T19:30:20.814593+0000 mgr.vm01.nwhpas (mgr.14227) 541 : audit [DBG] from='client.15652 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:21.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:21 vm01 bash[28222]: audit 2026-04-16T19:30:20.814593+0000 mgr.vm01.nwhpas (mgr.14227) 541 : audit [DBG] from='client.15652 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:21.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:21 vm01 bash[28222]: audit 2026-04-16T19:30:21.219023+0000 mon.vm01 (mon.0) 1110 : audit [DBG] from='client.? 192.168.123.101:0/473037822' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:21.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:21 vm01 bash[28222]: audit 2026-04-16T19:30:21.219023+0000 mon.vm01 (mon.0) 1110 : audit [DBG] from='client.? 192.168.123.101:0/473037822' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:22.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:22 vm04 bash[34817]: audit 2026-04-16T19:30:21.001798+0000 mgr.vm01.nwhpas (mgr.14227) 542 : audit [DBG] from='client.25041 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:22.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:22 vm04 bash[34817]: audit 2026-04-16T19:30:21.001798+0000 mgr.vm01.nwhpas (mgr.14227) 542 : audit [DBG] from='client.25041 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:22.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:22 vm04 bash[34817]: audit 2026-04-16T19:30:22.573007+0000 mon.vm01 (mon.0) 1111 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:22.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:22 vm04 bash[34817]: audit 2026-04-16T19:30:22.573007+0000 mon.vm01 (mon.0) 1111 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:22.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:22 vm01 bash[28222]: audit 2026-04-16T19:30:21.001798+0000 mgr.vm01.nwhpas (mgr.14227) 542 : audit [DBG] from='client.25041 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:22.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:22 vm01 bash[28222]: audit 2026-04-16T19:30:21.001798+0000 mgr.vm01.nwhpas (mgr.14227) 542 : audit [DBG] from='client.25041 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:22.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:22 vm01 bash[28222]: audit 2026-04-16T19:30:22.573007+0000 mon.vm01 (mon.0) 1111 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:22.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:22 vm01 bash[28222]: audit 2026-04-16T19:30:22.573007+0000 mon.vm01 (mon.0) 1111 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:23 vm04 bash[34817]: cluster 2026-04-16T19:30:22.355336+0000 mgr.vm01.nwhpas (mgr.14227) 543 : cluster [DBG] pgmap v291: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:23.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:23 vm04 bash[34817]: cluster 2026-04-16T19:30:22.355336+0000 mgr.vm01.nwhpas (mgr.14227) 543 : cluster [DBG] pgmap v291: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:23.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:23 vm01 bash[28222]: cluster 2026-04-16T19:30:22.355336+0000 mgr.vm01.nwhpas (mgr.14227) 543 : cluster [DBG] pgmap v291: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:23.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:23 vm01 bash[28222]: cluster 2026-04-16T19:30:22.355336+0000 mgr.vm01.nwhpas (mgr.14227) 543 : cluster [DBG] pgmap v291: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:25.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:25 vm04 bash[34817]: cluster 2026-04-16T19:30:24.355784+0000 mgr.vm01.nwhpas (mgr.14227) 544 : cluster [DBG] pgmap v292: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:25.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:25 vm04 bash[34817]: cluster 2026-04-16T19:30:24.355784+0000 mgr.vm01.nwhpas (mgr.14227) 544 : cluster [DBG] pgmap v292: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:25.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:25 vm01 bash[28222]: cluster 2026-04-16T19:30:24.355784+0000 mgr.vm01.nwhpas (mgr.14227) 544 : cluster [DBG] pgmap v292: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:25.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:25 vm01 bash[28222]: cluster 2026-04-16T19:30:24.355784+0000 mgr.vm01.nwhpas (mgr.14227) 544 : cluster [DBG] pgmap v292: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:26.422 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:30:26.594 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:30:26.594 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (6m) 68s ago 7m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:30:26.594 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (73s) 68s ago 7m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:30:26.594 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 52s ago 7m - - 2026-04-16T19:30:26.594 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (7m) 52s ago 7m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:30:26.807 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:26.807 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:26.807 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:27.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:27 vm04 bash[34817]: cluster 2026-04-16T19:30:26.356185+0000 mgr.vm01.nwhpas (mgr.14227) 545 : cluster [DBG] pgmap v293: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:27.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:27 vm04 bash[34817]: cluster 2026-04-16T19:30:26.356185+0000 mgr.vm01.nwhpas (mgr.14227) 545 : cluster [DBG] pgmap v293: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:27.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:27 vm04 bash[34817]: audit 2026-04-16T19:30:26.402174+0000 mgr.vm01.nwhpas (mgr.14227) 546 : audit [DBG] from='client.15664 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:27.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:27 vm04 bash[34817]: audit 2026-04-16T19:30:26.402174+0000 mgr.vm01.nwhpas (mgr.14227) 546 : audit [DBG] from='client.15664 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:27.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:27 vm04 bash[34817]: audit 2026-04-16T19:30:26.586312+0000 mgr.vm01.nwhpas (mgr.14227) 547 : audit [DBG] from='client.15668 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:27.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:27 vm04 bash[34817]: audit 2026-04-16T19:30:26.586312+0000 mgr.vm01.nwhpas (mgr.14227) 547 : audit [DBG] from='client.15668 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:27.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:27 vm04 bash[34817]: audit 2026-04-16T19:30:26.802995+0000 mon.vm01 (mon.0) 1112 : audit [DBG] from='client.? 192.168.123.101:0/1870350025' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:27.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:27 vm04 bash[34817]: audit 2026-04-16T19:30:26.802995+0000 mon.vm01 (mon.0) 1112 : audit [DBG] from='client.? 192.168.123.101:0/1870350025' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:27.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:27 vm01 bash[28222]: cluster 2026-04-16T19:30:26.356185+0000 mgr.vm01.nwhpas (mgr.14227) 545 : cluster [DBG] pgmap v293: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:27.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:27 vm01 bash[28222]: cluster 2026-04-16T19:30:26.356185+0000 mgr.vm01.nwhpas (mgr.14227) 545 : cluster [DBG] pgmap v293: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:27.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:27 vm01 bash[28222]: audit 2026-04-16T19:30:26.402174+0000 mgr.vm01.nwhpas (mgr.14227) 546 : audit [DBG] from='client.15664 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:27.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:27 vm01 bash[28222]: audit 2026-04-16T19:30:26.402174+0000 mgr.vm01.nwhpas (mgr.14227) 546 : audit [DBG] from='client.15664 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:27.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:27 vm01 bash[28222]: audit 2026-04-16T19:30:26.586312+0000 mgr.vm01.nwhpas (mgr.14227) 547 : audit [DBG] from='client.15668 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:27.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:27 vm01 bash[28222]: audit 2026-04-16T19:30:26.586312+0000 mgr.vm01.nwhpas (mgr.14227) 547 : audit [DBG] from='client.15668 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:27.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:27 vm01 bash[28222]: audit 2026-04-16T19:30:26.802995+0000 mon.vm01 (mon.0) 1112 : audit [DBG] from='client.? 192.168.123.101:0/1870350025' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:27.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:27 vm01 bash[28222]: audit 2026-04-16T19:30:26.802995+0000 mon.vm01 (mon.0) 1112 : audit [DBG] from='client.? 192.168.123.101:0/1870350025' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:29.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:29 vm04 bash[34817]: cluster 2026-04-16T19:30:28.356552+0000 mgr.vm01.nwhpas (mgr.14227) 548 : cluster [DBG] pgmap v294: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:29.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:29 vm04 bash[34817]: cluster 2026-04-16T19:30:28.356552+0000 mgr.vm01.nwhpas (mgr.14227) 548 : cluster [DBG] pgmap v294: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:29.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:29 vm01 bash[28222]: cluster 2026-04-16T19:30:28.356552+0000 mgr.vm01.nwhpas (mgr.14227) 548 : cluster [DBG] pgmap v294: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:29.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:29 vm01 bash[28222]: cluster 2026-04-16T19:30:28.356552+0000 mgr.vm01.nwhpas (mgr.14227) 548 : cluster [DBG] pgmap v294: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:32.017 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:30:32.197 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:30:32.197 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (6m) 73s ago 7m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:30:32.197 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (79s) 73s ago 7m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:30:32.197 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 57s ago 7m - - 2026-04-16T19:30:32.197 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (7m) 57s ago 7m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:30:32.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:31 vm04 bash[34817]: cluster 2026-04-16T19:30:30.357046+0000 mgr.vm01.nwhpas (mgr.14227) 549 : cluster [DBG] pgmap v295: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:32.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:31 vm04 bash[34817]: cluster 2026-04-16T19:30:30.357046+0000 mgr.vm01.nwhpas (mgr.14227) 549 : cluster [DBG] pgmap v295: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:31 vm01 bash[28222]: cluster 2026-04-16T19:30:30.357046+0000 mgr.vm01.nwhpas (mgr.14227) 549 : cluster [DBG] pgmap v295: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:31 vm01 bash[28222]: cluster 2026-04-16T19:30:30.357046+0000 mgr.vm01.nwhpas (mgr.14227) 549 : cluster [DBG] pgmap v295: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:32.417 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:32.417 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:32.417 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:33.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:32 vm04 bash[34817]: audit 2026-04-16T19:30:32.412600+0000 mon.vm01 (mon.0) 1113 : audit [DBG] from='client.? 192.168.123.101:0/2959883528' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:33.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:32 vm04 bash[34817]: audit 2026-04-16T19:30:32.412600+0000 mon.vm01 (mon.0) 1113 : audit [DBG] from='client.? 192.168.123.101:0/2959883528' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:33.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:32 vm01 bash[28222]: audit 2026-04-16T19:30:32.412600+0000 mon.vm01 (mon.0) 1113 : audit [DBG] from='client.? 192.168.123.101:0/2959883528' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:33.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:32 vm01 bash[28222]: audit 2026-04-16T19:30:32.412600+0000 mon.vm01 (mon.0) 1113 : audit [DBG] from='client.? 192.168.123.101:0/2959883528' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:34.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:33 vm04 bash[34817]: audit 2026-04-16T19:30:31.996372+0000 mgr.vm01.nwhpas (mgr.14227) 550 : audit [DBG] from='client.15676 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:34.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:33 vm04 bash[34817]: audit 2026-04-16T19:30:31.996372+0000 mgr.vm01.nwhpas (mgr.14227) 550 : audit [DBG] from='client.15676 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:34.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:33 vm04 bash[34817]: audit 2026-04-16T19:30:32.189704+0000 mgr.vm01.nwhpas (mgr.14227) 551 : audit [DBG] from='client.15680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:34.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:33 vm04 bash[34817]: audit 2026-04-16T19:30:32.189704+0000 mgr.vm01.nwhpas (mgr.14227) 551 : audit [DBG] from='client.15680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:34.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:33 vm04 bash[34817]: cluster 2026-04-16T19:30:32.357468+0000 mgr.vm01.nwhpas (mgr.14227) 552 : cluster [DBG] pgmap v296: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:34.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:33 vm04 bash[34817]: cluster 2026-04-16T19:30:32.357468+0000 mgr.vm01.nwhpas (mgr.14227) 552 : cluster [DBG] pgmap v296: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:34.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:33 vm01 bash[28222]: audit 2026-04-16T19:30:31.996372+0000 mgr.vm01.nwhpas (mgr.14227) 550 : audit [DBG] from='client.15676 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:34.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:33 vm01 bash[28222]: audit 2026-04-16T19:30:31.996372+0000 mgr.vm01.nwhpas (mgr.14227) 550 : audit [DBG] from='client.15676 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:34.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:33 vm01 bash[28222]: audit 2026-04-16T19:30:32.189704+0000 mgr.vm01.nwhpas (mgr.14227) 551 : audit [DBG] from='client.15680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:34.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:33 vm01 bash[28222]: audit 2026-04-16T19:30:32.189704+0000 mgr.vm01.nwhpas (mgr.14227) 551 : audit [DBG] from='client.15680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:34.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:33 vm01 bash[28222]: cluster 2026-04-16T19:30:32.357468+0000 mgr.vm01.nwhpas (mgr.14227) 552 : cluster [DBG] pgmap v296: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:34.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:33 vm01 bash[28222]: cluster 2026-04-16T19:30:32.357468+0000 mgr.vm01.nwhpas (mgr.14227) 552 : cluster [DBG] pgmap v296: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:34 vm04 bash[34817]: audit 2026-04-16T19:30:34.364031+0000 mon.vm01 (mon.0) 1114 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:30:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:34 vm04 bash[34817]: audit 2026-04-16T19:30:34.364031+0000 mon.vm01 (mon.0) 1114 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:30:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:34 vm04 bash[34817]: audit 2026-04-16T19:30:34.700004+0000 mon.vm01 (mon.0) 1115 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:30:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:34 vm04 bash[34817]: audit 2026-04-16T19:30:34.700004+0000 mon.vm01 (mon.0) 1115 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:30:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:34 vm04 bash[34817]: audit 2026-04-16T19:30:34.700559+0000 mon.vm01 (mon.0) 1116 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:30:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:34 vm04 bash[34817]: audit 2026-04-16T19:30:34.700559+0000 mon.vm01 (mon.0) 1116 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:30:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:34 vm04 bash[34817]: audit 2026-04-16T19:30:34.705556+0000 mon.vm01 (mon.0) 1117 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:30:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:34 vm04 bash[34817]: audit 2026-04-16T19:30:34.705556+0000 mon.vm01 (mon.0) 1117 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:30:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:34 vm04 bash[34817]: audit 2026-04-16T19:30:34.707212+0000 mon.vm01 (mon.0) 1118 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:30:35.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:34 vm04 bash[34817]: audit 2026-04-16T19:30:34.707212+0000 mon.vm01 (mon.0) 1118 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:30:35.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:34 vm01 bash[28222]: audit 2026-04-16T19:30:34.364031+0000 mon.vm01 (mon.0) 1114 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:30:35.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:34 vm01 bash[28222]: audit 2026-04-16T19:30:34.364031+0000 mon.vm01 (mon.0) 1114 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:30:35.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:34 vm01 bash[28222]: audit 2026-04-16T19:30:34.700004+0000 mon.vm01 (mon.0) 1115 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:30:35.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:34 vm01 bash[28222]: audit 2026-04-16T19:30:34.700004+0000 mon.vm01 (mon.0) 1115 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:30:35.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:34 vm01 bash[28222]: audit 2026-04-16T19:30:34.700559+0000 mon.vm01 (mon.0) 1116 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:30:35.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:34 vm01 bash[28222]: audit 2026-04-16T19:30:34.700559+0000 mon.vm01 (mon.0) 1116 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:30:35.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:34 vm01 bash[28222]: audit 2026-04-16T19:30:34.705556+0000 mon.vm01 (mon.0) 1117 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:30:35.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:34 vm01 bash[28222]: audit 2026-04-16T19:30:34.705556+0000 mon.vm01 (mon.0) 1117 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:30:35.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:34 vm01 bash[28222]: audit 2026-04-16T19:30:34.707212+0000 mon.vm01 (mon.0) 1118 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:30:35.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:34 vm01 bash[28222]: audit 2026-04-16T19:30:34.707212+0000 mon.vm01 (mon.0) 1118 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:30:36.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:35 vm04 bash[34817]: cluster 2026-04-16T19:30:34.357888+0000 mgr.vm01.nwhpas (mgr.14227) 553 : cluster [DBG] pgmap v297: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:36.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:35 vm04 bash[34817]: cluster 2026-04-16T19:30:34.357888+0000 mgr.vm01.nwhpas (mgr.14227) 553 : cluster [DBG] pgmap v297: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:36.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:35 vm04 bash[34817]: cluster 2026-04-16T19:30:34.701424+0000 mgr.vm01.nwhpas (mgr.14227) 554 : cluster [DBG] pgmap v298: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:30:36.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:35 vm04 bash[34817]: cluster 2026-04-16T19:30:34.701424+0000 mgr.vm01.nwhpas (mgr.14227) 554 : cluster [DBG] pgmap v298: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:30:36.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:35 vm04 bash[34817]: cluster 2026-04-16T19:30:34.701563+0000 mgr.vm01.nwhpas (mgr.14227) 555 : cluster [DBG] pgmap v299: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:30:36.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:35 vm04 bash[34817]: cluster 2026-04-16T19:30:34.701563+0000 mgr.vm01.nwhpas (mgr.14227) 555 : cluster [DBG] pgmap v299: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:30:36.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:35 vm01 bash[28222]: cluster 2026-04-16T19:30:34.357888+0000 mgr.vm01.nwhpas (mgr.14227) 553 : cluster [DBG] pgmap v297: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:36.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:35 vm01 bash[28222]: cluster 2026-04-16T19:30:34.357888+0000 mgr.vm01.nwhpas (mgr.14227) 553 : cluster [DBG] pgmap v297: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:36.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:35 vm01 bash[28222]: cluster 2026-04-16T19:30:34.701424+0000 mgr.vm01.nwhpas (mgr.14227) 554 : cluster [DBG] pgmap v298: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:30:36.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:35 vm01 bash[28222]: cluster 2026-04-16T19:30:34.701424+0000 mgr.vm01.nwhpas (mgr.14227) 554 : cluster [DBG] pgmap v298: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:30:36.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:35 vm01 bash[28222]: cluster 2026-04-16T19:30:34.701563+0000 mgr.vm01.nwhpas (mgr.14227) 555 : cluster [DBG] pgmap v299: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:30:36.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:35 vm01 bash[28222]: cluster 2026-04-16T19:30:34.701563+0000 mgr.vm01.nwhpas (mgr.14227) 555 : cluster [DBG] pgmap v299: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:30:37.621 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:30:37.796 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:30:37.796 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (6m) 79s ago 7m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:30:37.796 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (84s) 79s ago 7m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:30:37.796 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 63s ago 7m - - 2026-04-16T19:30:37.796 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (7m) 63s ago 7m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:30:38.016 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:38.016 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:38.016 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:38.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:37 vm04 bash[34817]: cluster 2026-04-16T19:30:36.702002+0000 mgr.vm01.nwhpas (mgr.14227) 556 : cluster [DBG] pgmap v300: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:38.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:37 vm04 bash[34817]: cluster 2026-04-16T19:30:36.702002+0000 mgr.vm01.nwhpas (mgr.14227) 556 : cluster [DBG] pgmap v300: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:38.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:37 vm04 bash[34817]: audit 2026-04-16T19:30:37.575954+0000 mon.vm01 (mon.0) 1119 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:38.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:37 vm04 bash[34817]: audit 2026-04-16T19:30:37.575954+0000 mon.vm01 (mon.0) 1119 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:37 vm01 bash[28222]: cluster 2026-04-16T19:30:36.702002+0000 mgr.vm01.nwhpas (mgr.14227) 556 : cluster [DBG] pgmap v300: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:37 vm01 bash[28222]: cluster 2026-04-16T19:30:36.702002+0000 mgr.vm01.nwhpas (mgr.14227) 556 : cluster [DBG] pgmap v300: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:37 vm01 bash[28222]: audit 2026-04-16T19:30:37.575954+0000 mon.vm01 (mon.0) 1119 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:37 vm01 bash[28222]: audit 2026-04-16T19:30:37.575954+0000 mon.vm01 (mon.0) 1119 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:39.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:38 vm04 bash[34817]: audit 2026-04-16T19:30:37.600165+0000 mgr.vm01.nwhpas (mgr.14227) 557 : audit [DBG] from='client.15688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:39.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:38 vm04 bash[34817]: audit 2026-04-16T19:30:37.600165+0000 mgr.vm01.nwhpas (mgr.14227) 557 : audit [DBG] from='client.15688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:39.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:38 vm04 bash[34817]: audit 2026-04-16T19:30:37.788693+0000 mgr.vm01.nwhpas (mgr.14227) 558 : audit [DBG] from='client.15692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:39.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:38 vm04 bash[34817]: audit 2026-04-16T19:30:37.788693+0000 mgr.vm01.nwhpas (mgr.14227) 558 : audit [DBG] from='client.15692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:39.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:38 vm04 bash[34817]: audit 2026-04-16T19:30:38.011767+0000 mon.vm01 (mon.0) 1120 : audit [DBG] from='client.? 192.168.123.101:0/3763371524' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:39.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:38 vm04 bash[34817]: audit 2026-04-16T19:30:38.011767+0000 mon.vm01 (mon.0) 1120 : audit [DBG] from='client.? 192.168.123.101:0/3763371524' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:38 vm01 bash[28222]: audit 2026-04-16T19:30:37.600165+0000 mgr.vm01.nwhpas (mgr.14227) 557 : audit [DBG] from='client.15688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:38 vm01 bash[28222]: audit 2026-04-16T19:30:37.600165+0000 mgr.vm01.nwhpas (mgr.14227) 557 : audit [DBG] from='client.15688 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:38 vm01 bash[28222]: audit 2026-04-16T19:30:37.788693+0000 mgr.vm01.nwhpas (mgr.14227) 558 : audit [DBG] from='client.15692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:38 vm01 bash[28222]: audit 2026-04-16T19:30:37.788693+0000 mgr.vm01.nwhpas (mgr.14227) 558 : audit [DBG] from='client.15692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:38 vm01 bash[28222]: audit 2026-04-16T19:30:38.011767+0000 mon.vm01 (mon.0) 1120 : audit [DBG] from='client.? 192.168.123.101:0/3763371524' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:38 vm01 bash[28222]: audit 2026-04-16T19:30:38.011767+0000 mon.vm01 (mon.0) 1120 : audit [DBG] from='client.? 192.168.123.101:0/3763371524' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:40.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:39 vm04 bash[34817]: cluster 2026-04-16T19:30:38.702436+0000 mgr.vm01.nwhpas (mgr.14227) 559 : cluster [DBG] pgmap v301: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:40.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:39 vm04 bash[34817]: cluster 2026-04-16T19:30:38.702436+0000 mgr.vm01.nwhpas (mgr.14227) 559 : cluster [DBG] pgmap v301: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:39 vm01 bash[28222]: cluster 2026-04-16T19:30:38.702436+0000 mgr.vm01.nwhpas (mgr.14227) 559 : cluster [DBG] pgmap v301: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:39 vm01 bash[28222]: cluster 2026-04-16T19:30:38.702436+0000 mgr.vm01.nwhpas (mgr.14227) 559 : cluster [DBG] pgmap v301: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:42.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:41 vm04 bash[34817]: cluster 2026-04-16T19:30:40.702933+0000 mgr.vm01.nwhpas (mgr.14227) 560 : cluster [DBG] pgmap v302: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:42.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:41 vm04 bash[34817]: cluster 2026-04-16T19:30:40.702933+0000 mgr.vm01.nwhpas (mgr.14227) 560 : cluster [DBG] pgmap v302: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:42.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:41 vm01 bash[28222]: cluster 2026-04-16T19:30:40.702933+0000 mgr.vm01.nwhpas (mgr.14227) 560 : cluster [DBG] pgmap v302: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:42.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:41 vm01 bash[28222]: cluster 2026-04-16T19:30:40.702933+0000 mgr.vm01.nwhpas (mgr.14227) 560 : cluster [DBG] pgmap v302: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:43.228 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:30:43.425 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:30:43.425 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (6m) 85s ago 7m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:30:43.425 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (90s) 85s ago 7m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:30:43.425 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 69s ago 7m - - 2026-04-16T19:30:43.425 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (7m) 69s ago 7m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:30:43.675 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:43.675 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:43.675 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:44.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:43 vm04 bash[34817]: cluster 2026-04-16T19:30:42.703337+0000 mgr.vm01.nwhpas (mgr.14227) 561 : cluster [DBG] pgmap v303: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:44.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:43 vm04 bash[34817]: cluster 2026-04-16T19:30:42.703337+0000 mgr.vm01.nwhpas (mgr.14227) 561 : cluster [DBG] pgmap v303: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:44.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:43 vm04 bash[34817]: audit 2026-04-16T19:30:43.670389+0000 mon.vm01 (mon.0) 1121 : audit [DBG] from='client.? 192.168.123.101:0/2118055306' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:44.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:43 vm04 bash[34817]: audit 2026-04-16T19:30:43.670389+0000 mon.vm01 (mon.0) 1121 : audit [DBG] from='client.? 192.168.123.101:0/2118055306' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:44.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:43 vm01 bash[28222]: cluster 2026-04-16T19:30:42.703337+0000 mgr.vm01.nwhpas (mgr.14227) 561 : cluster [DBG] pgmap v303: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:44.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:43 vm01 bash[28222]: cluster 2026-04-16T19:30:42.703337+0000 mgr.vm01.nwhpas (mgr.14227) 561 : cluster [DBG] pgmap v303: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:30:44.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:43 vm01 bash[28222]: audit 2026-04-16T19:30:43.670389+0000 mon.vm01 (mon.0) 1121 : audit [DBG] from='client.? 192.168.123.101:0/2118055306' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:44.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:43 vm01 bash[28222]: audit 2026-04-16T19:30:43.670389+0000 mon.vm01 (mon.0) 1121 : audit [DBG] from='client.? 192.168.123.101:0/2118055306' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:45.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:44 vm04 bash[34817]: audit 2026-04-16T19:30:43.203230+0000 mgr.vm01.nwhpas (mgr.14227) 562 : audit [DBG] from='client.15700 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:45.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:44 vm04 bash[34817]: audit 2026-04-16T19:30:43.203230+0000 mgr.vm01.nwhpas (mgr.14227) 562 : audit [DBG] from='client.15700 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:45.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:44 vm04 bash[34817]: audit 2026-04-16T19:30:43.417721+0000 mgr.vm01.nwhpas (mgr.14227) 563 : audit [DBG] from='client.15704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:45.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:44 vm04 bash[34817]: audit 2026-04-16T19:30:43.417721+0000 mgr.vm01.nwhpas (mgr.14227) 563 : audit [DBG] from='client.15704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:45.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:44 vm01 bash[28222]: audit 2026-04-16T19:30:43.203230+0000 mgr.vm01.nwhpas (mgr.14227) 562 : audit [DBG] from='client.15700 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:45.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:44 vm01 bash[28222]: audit 2026-04-16T19:30:43.203230+0000 mgr.vm01.nwhpas (mgr.14227) 562 : audit [DBG] from='client.15700 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:45.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:44 vm01 bash[28222]: audit 2026-04-16T19:30:43.417721+0000 mgr.vm01.nwhpas (mgr.14227) 563 : audit [DBG] from='client.15704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:45.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:44 vm01 bash[28222]: audit 2026-04-16T19:30:43.417721+0000 mgr.vm01.nwhpas (mgr.14227) 563 : audit [DBG] from='client.15704 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:46.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:45 vm04 bash[34817]: cluster 2026-04-16T19:30:44.704034+0000 mgr.vm01.nwhpas (mgr.14227) 564 : cluster [DBG] pgmap v304: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:30:46.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:45 vm04 bash[34817]: cluster 2026-04-16T19:30:44.704034+0000 mgr.vm01.nwhpas (mgr.14227) 564 : cluster [DBG] pgmap v304: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:30:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:45 vm01 bash[28222]: cluster 2026-04-16T19:30:44.704034+0000 mgr.vm01.nwhpas (mgr.14227) 564 : cluster [DBG] pgmap v304: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:30:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:45 vm01 bash[28222]: cluster 2026-04-16T19:30:44.704034+0000 mgr.vm01.nwhpas (mgr.14227) 564 : cluster [DBG] pgmap v304: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:30:48.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:47 vm04 bash[34817]: cluster 2026-04-16T19:30:46.704506+0000 mgr.vm01.nwhpas (mgr.14227) 565 : cluster [DBG] pgmap v305: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:48.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:47 vm04 bash[34817]: cluster 2026-04-16T19:30:46.704506+0000 mgr.vm01.nwhpas (mgr.14227) 565 : cluster [DBG] pgmap v305: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:48.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:47 vm01 bash[28222]: cluster 2026-04-16T19:30:46.704506+0000 mgr.vm01.nwhpas (mgr.14227) 565 : cluster [DBG] pgmap v305: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:48.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:47 vm01 bash[28222]: cluster 2026-04-16T19:30:46.704506+0000 mgr.vm01.nwhpas (mgr.14227) 565 : cluster [DBG] pgmap v305: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:48.887 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:30:49.092 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:30:49.093 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (6m) 90s ago 7m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:30:49.093 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (96s) 90s ago 7m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:30:49.093 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 74s ago 7m - - 2026-04-16T19:30:49.093 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (7m) 74s ago 7m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:30:49.327 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:49.327 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:49.327 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:50.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:49 vm04 bash[34817]: cluster 2026-04-16T19:30:48.704903+0000 mgr.vm01.nwhpas (mgr.14227) 566 : cluster [DBG] pgmap v306: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:50.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:49 vm04 bash[34817]: cluster 2026-04-16T19:30:48.704903+0000 mgr.vm01.nwhpas (mgr.14227) 566 : cluster [DBG] pgmap v306: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:50.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:49 vm04 bash[34817]: audit 2026-04-16T19:30:48.865851+0000 mgr.vm01.nwhpas (mgr.14227) 567 : audit [DBG] from='client.15712 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:50.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:49 vm04 bash[34817]: audit 2026-04-16T19:30:48.865851+0000 mgr.vm01.nwhpas (mgr.14227) 567 : audit [DBG] from='client.15712 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:50.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:49 vm04 bash[34817]: audit 2026-04-16T19:30:49.322505+0000 mon.vm01 (mon.0) 1122 : audit [DBG] from='client.? 192.168.123.101:0/2852097815' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:50.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:49 vm04 bash[34817]: audit 2026-04-16T19:30:49.322505+0000 mon.vm01 (mon.0) 1122 : audit [DBG] from='client.? 192.168.123.101:0/2852097815' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:50.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:49 vm01 bash[28222]: cluster 2026-04-16T19:30:48.704903+0000 mgr.vm01.nwhpas (mgr.14227) 566 : cluster [DBG] pgmap v306: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:50.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:49 vm01 bash[28222]: cluster 2026-04-16T19:30:48.704903+0000 mgr.vm01.nwhpas (mgr.14227) 566 : cluster [DBG] pgmap v306: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:50.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:49 vm01 bash[28222]: audit 2026-04-16T19:30:48.865851+0000 mgr.vm01.nwhpas (mgr.14227) 567 : audit [DBG] from='client.15712 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:50.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:49 vm01 bash[28222]: audit 2026-04-16T19:30:48.865851+0000 mgr.vm01.nwhpas (mgr.14227) 567 : audit [DBG] from='client.15712 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:50.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:49 vm01 bash[28222]: audit 2026-04-16T19:30:49.322505+0000 mon.vm01 (mon.0) 1122 : audit [DBG] from='client.? 192.168.123.101:0/2852097815' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:50.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:49 vm01 bash[28222]: audit 2026-04-16T19:30:49.322505+0000 mon.vm01 (mon.0) 1122 : audit [DBG] from='client.? 192.168.123.101:0/2852097815' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:51.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:50 vm04 bash[34817]: audit 2026-04-16T19:30:49.085523+0000 mgr.vm01.nwhpas (mgr.14227) 568 : audit [DBG] from='client.15716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:51.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:50 vm04 bash[34817]: audit 2026-04-16T19:30:49.085523+0000 mgr.vm01.nwhpas (mgr.14227) 568 : audit [DBG] from='client.15716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:51.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:50 vm01 bash[28222]: audit 2026-04-16T19:30:49.085523+0000 mgr.vm01.nwhpas (mgr.14227) 568 : audit [DBG] from='client.15716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:51.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:50 vm01 bash[28222]: audit 2026-04-16T19:30:49.085523+0000 mgr.vm01.nwhpas (mgr.14227) 568 : audit [DBG] from='client.15716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:52.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:51 vm04 bash[34817]: cluster 2026-04-16T19:30:50.705311+0000 mgr.vm01.nwhpas (mgr.14227) 569 : cluster [DBG] pgmap v307: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:52.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:51 vm04 bash[34817]: cluster 2026-04-16T19:30:50.705311+0000 mgr.vm01.nwhpas (mgr.14227) 569 : cluster [DBG] pgmap v307: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:52.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:51 vm01 bash[28222]: cluster 2026-04-16T19:30:50.705311+0000 mgr.vm01.nwhpas (mgr.14227) 569 : cluster [DBG] pgmap v307: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:52.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:51 vm01 bash[28222]: cluster 2026-04-16T19:30:50.705311+0000 mgr.vm01.nwhpas (mgr.14227) 569 : cluster [DBG] pgmap v307: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:53.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:52 vm04 bash[34817]: audit 2026-04-16T19:30:52.576208+0000 mon.vm01 (mon.0) 1123 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:53.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:52 vm04 bash[34817]: audit 2026-04-16T19:30:52.576208+0000 mon.vm01 (mon.0) 1123 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:53.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:52 vm01 bash[28222]: audit 2026-04-16T19:30:52.576208+0000 mon.vm01 (mon.0) 1123 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:53.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:52 vm01 bash[28222]: audit 2026-04-16T19:30:52.576208+0000 mon.vm01 (mon.0) 1123 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:30:54.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:53 vm04 bash[34817]: cluster 2026-04-16T19:30:52.705629+0000 mgr.vm01.nwhpas (mgr.14227) 570 : cluster [DBG] pgmap v308: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:54.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:53 vm04 bash[34817]: cluster 2026-04-16T19:30:52.705629+0000 mgr.vm01.nwhpas (mgr.14227) 570 : cluster [DBG] pgmap v308: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:54.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:53 vm01 bash[28222]: cluster 2026-04-16T19:30:52.705629+0000 mgr.vm01.nwhpas (mgr.14227) 570 : cluster [DBG] pgmap v308: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:54.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:53 vm01 bash[28222]: cluster 2026-04-16T19:30:52.705629+0000 mgr.vm01.nwhpas (mgr.14227) 570 : cluster [DBG] pgmap v308: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:54.529 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:30:54.718 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:30:54.718 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (6m) 96s ago 7m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:30:54.718 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (101s) 96s ago 7m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:30:54.718 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 80s ago 7m - - 2026-04-16T19:30:54.718 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (7m) 80s ago 7m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:30:54.954 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:30:54.954 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:30:54.954 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:30:55.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:54 vm04 bash[34817]: audit 2026-04-16T19:30:54.508050+0000 mgr.vm01.nwhpas (mgr.14227) 571 : audit [DBG] from='client.15724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:55.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:54 vm04 bash[34817]: audit 2026-04-16T19:30:54.508050+0000 mgr.vm01.nwhpas (mgr.14227) 571 : audit [DBG] from='client.15724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:55.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:54 vm04 bash[34817]: cluster 2026-04-16T19:30:54.706028+0000 mgr.vm01.nwhpas (mgr.14227) 572 : cluster [DBG] pgmap v309: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:55.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:54 vm04 bash[34817]: cluster 2026-04-16T19:30:54.706028+0000 mgr.vm01.nwhpas (mgr.14227) 572 : cluster [DBG] pgmap v309: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:55.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:54 vm04 bash[34817]: audit 2026-04-16T19:30:54.711441+0000 mgr.vm01.nwhpas (mgr.14227) 573 : audit [DBG] from='client.15728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:55.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:54 vm04 bash[34817]: audit 2026-04-16T19:30:54.711441+0000 mgr.vm01.nwhpas (mgr.14227) 573 : audit [DBG] from='client.15728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:54 vm01 bash[28222]: audit 2026-04-16T19:30:54.508050+0000 mgr.vm01.nwhpas (mgr.14227) 571 : audit [DBG] from='client.15724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:54 vm01 bash[28222]: audit 2026-04-16T19:30:54.508050+0000 mgr.vm01.nwhpas (mgr.14227) 571 : audit [DBG] from='client.15724 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:54 vm01 bash[28222]: cluster 2026-04-16T19:30:54.706028+0000 mgr.vm01.nwhpas (mgr.14227) 572 : cluster [DBG] pgmap v309: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:54 vm01 bash[28222]: cluster 2026-04-16T19:30:54.706028+0000 mgr.vm01.nwhpas (mgr.14227) 572 : cluster [DBG] pgmap v309: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:30:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:54 vm01 bash[28222]: audit 2026-04-16T19:30:54.711441+0000 mgr.vm01.nwhpas (mgr.14227) 573 : audit [DBG] from='client.15728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:54 vm01 bash[28222]: audit 2026-04-16T19:30:54.711441+0000 mgr.vm01.nwhpas (mgr.14227) 573 : audit [DBG] from='client.15728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:30:56.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:55 vm04 bash[34817]: audit 2026-04-16T19:30:54.949466+0000 mon.vm01 (mon.0) 1124 : audit [DBG] from='client.? 192.168.123.101:0/1367800018' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:56.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:55 vm04 bash[34817]: audit 2026-04-16T19:30:54.949466+0000 mon.vm01 (mon.0) 1124 : audit [DBG] from='client.? 192.168.123.101:0/1367800018' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:56.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:55 vm01 bash[28222]: audit 2026-04-16T19:30:54.949466+0000 mon.vm01 (mon.0) 1124 : audit [DBG] from='client.? 192.168.123.101:0/1367800018' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:56.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:55 vm01 bash[28222]: audit 2026-04-16T19:30:54.949466+0000 mon.vm01 (mon.0) 1124 : audit [DBG] from='client.? 192.168.123.101:0/1367800018' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:30:57.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:56 vm04 bash[34817]: cluster 2026-04-16T19:30:56.706475+0000 mgr.vm01.nwhpas (mgr.14227) 574 : cluster [DBG] pgmap v310: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:57.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:56 vm04 bash[34817]: cluster 2026-04-16T19:30:56.706475+0000 mgr.vm01.nwhpas (mgr.14227) 574 : cluster [DBG] pgmap v310: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:57.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:56 vm01 bash[28222]: cluster 2026-04-16T19:30:56.706475+0000 mgr.vm01.nwhpas (mgr.14227) 574 : cluster [DBG] pgmap v310: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:30:57.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:56 vm01 bash[28222]: cluster 2026-04-16T19:30:56.706475+0000 mgr.vm01.nwhpas (mgr.14227) 574 : cluster [DBG] pgmap v310: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:00.176 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:31:00.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:59 vm04 bash[34817]: cluster 2026-04-16T19:30:58.706875+0000 mgr.vm01.nwhpas (mgr.14227) 575 : cluster [DBG] pgmap v311: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:00.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:30:59 vm04 bash[34817]: cluster 2026-04-16T19:30:58.706875+0000 mgr.vm01.nwhpas (mgr.14227) 575 : cluster [DBG] pgmap v311: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:00.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:59 vm01 bash[28222]: cluster 2026-04-16T19:30:58.706875+0000 mgr.vm01.nwhpas (mgr.14227) 575 : cluster [DBG] pgmap v311: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:00.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:30:59 vm01 bash[28222]: cluster 2026-04-16T19:30:58.706875+0000 mgr.vm01.nwhpas (mgr.14227) 575 : cluster [DBG] pgmap v311: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:00.366 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:31:00.366 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (7m) 102s ago 7m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:31:00.366 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (107s) 102s ago 7m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:31:00.366 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 86s ago 7m - - 2026-04-16T19:31:00.366 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (7m) 86s ago 7m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:31:00.592 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:31:00.592 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:31:00.592 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:31:01.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:00 vm04 bash[34817]: audit 2026-04-16T19:31:00.587257+0000 mon.vm01 (mon.0) 1125 : audit [DBG] from='client.? 192.168.123.101:0/2863215913' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:01.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:00 vm04 bash[34817]: audit 2026-04-16T19:31:00.587257+0000 mon.vm01 (mon.0) 1125 : audit [DBG] from='client.? 192.168.123.101:0/2863215913' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:01.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:00 vm01 bash[28222]: audit 2026-04-16T19:31:00.587257+0000 mon.vm01 (mon.0) 1125 : audit [DBG] from='client.? 192.168.123.101:0/2863215913' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:01.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:00 vm01 bash[28222]: audit 2026-04-16T19:31:00.587257+0000 mon.vm01 (mon.0) 1125 : audit [DBG] from='client.? 192.168.123.101:0/2863215913' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:01 vm04 bash[34817]: audit 2026-04-16T19:31:00.153549+0000 mgr.vm01.nwhpas (mgr.14227) 576 : audit [DBG] from='client.15736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:01 vm04 bash[34817]: audit 2026-04-16T19:31:00.153549+0000 mgr.vm01.nwhpas (mgr.14227) 576 : audit [DBG] from='client.15736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:01 vm04 bash[34817]: audit 2026-04-16T19:31:00.359194+0000 mgr.vm01.nwhpas (mgr.14227) 577 : audit [DBG] from='client.15740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:01 vm04 bash[34817]: audit 2026-04-16T19:31:00.359194+0000 mgr.vm01.nwhpas (mgr.14227) 577 : audit [DBG] from='client.15740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:01 vm04 bash[34817]: cluster 2026-04-16T19:31:00.707429+0000 mgr.vm01.nwhpas (mgr.14227) 578 : cluster [DBG] pgmap v312: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:02.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:01 vm04 bash[34817]: cluster 2026-04-16T19:31:00.707429+0000 mgr.vm01.nwhpas (mgr.14227) 578 : cluster [DBG] pgmap v312: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:02.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:01 vm01 bash[28222]: audit 2026-04-16T19:31:00.153549+0000 mgr.vm01.nwhpas (mgr.14227) 576 : audit [DBG] from='client.15736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:02.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:01 vm01 bash[28222]: audit 2026-04-16T19:31:00.153549+0000 mgr.vm01.nwhpas (mgr.14227) 576 : audit [DBG] from='client.15736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:02.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:01 vm01 bash[28222]: audit 2026-04-16T19:31:00.359194+0000 mgr.vm01.nwhpas (mgr.14227) 577 : audit [DBG] from='client.15740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:02.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:01 vm01 bash[28222]: audit 2026-04-16T19:31:00.359194+0000 mgr.vm01.nwhpas (mgr.14227) 577 : audit [DBG] from='client.15740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:02.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:01 vm01 bash[28222]: cluster 2026-04-16T19:31:00.707429+0000 mgr.vm01.nwhpas (mgr.14227) 578 : cluster [DBG] pgmap v312: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:02.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:01 vm01 bash[28222]: cluster 2026-04-16T19:31:00.707429+0000 mgr.vm01.nwhpas (mgr.14227) 578 : cluster [DBG] pgmap v312: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:04.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:03 vm04 bash[34817]: cluster 2026-04-16T19:31:02.707817+0000 mgr.vm01.nwhpas (mgr.14227) 579 : cluster [DBG] pgmap v313: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:04.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:03 vm04 bash[34817]: cluster 2026-04-16T19:31:02.707817+0000 mgr.vm01.nwhpas (mgr.14227) 579 : cluster [DBG] pgmap v313: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:04.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:03 vm01 bash[28222]: cluster 2026-04-16T19:31:02.707817+0000 mgr.vm01.nwhpas (mgr.14227) 579 : cluster [DBG] pgmap v313: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:04.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:03 vm01 bash[28222]: cluster 2026-04-16T19:31:02.707817+0000 mgr.vm01.nwhpas (mgr.14227) 579 : cluster [DBG] pgmap v313: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:05.789 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:31:05.969 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:31:05.970 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (7m) 107s ago 8m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:31:05.970 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (112s) 107s ago 8m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:31:05.970 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 91s ago 8m - - 2026-04-16T19:31:05.970 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (8m) 91s ago 8m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:31:06.189 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:31:06.190 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:31:06.190 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:31:06.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:05 vm04 bash[34817]: cluster 2026-04-16T19:31:04.708191+0000 mgr.vm01.nwhpas (mgr.14227) 580 : cluster [DBG] pgmap v314: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:06.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:05 vm04 bash[34817]: cluster 2026-04-16T19:31:04.708191+0000 mgr.vm01.nwhpas (mgr.14227) 580 : cluster [DBG] pgmap v314: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:06.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:05 vm01 bash[28222]: cluster 2026-04-16T19:31:04.708191+0000 mgr.vm01.nwhpas (mgr.14227) 580 : cluster [DBG] pgmap v314: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:06.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:05 vm01 bash[28222]: cluster 2026-04-16T19:31:04.708191+0000 mgr.vm01.nwhpas (mgr.14227) 580 : cluster [DBG] pgmap v314: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:06 vm01 bash[28222]: audit 2026-04-16T19:31:05.769034+0000 mgr.vm01.nwhpas (mgr.14227) 581 : audit [DBG] from='client.15748 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:06 vm01 bash[28222]: audit 2026-04-16T19:31:05.769034+0000 mgr.vm01.nwhpas (mgr.14227) 581 : audit [DBG] from='client.15748 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:06 vm01 bash[28222]: audit 2026-04-16T19:31:06.185206+0000 mon.vm01 (mon.0) 1126 : audit [DBG] from='client.? 192.168.123.101:0/813762927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:06 vm01 bash[28222]: audit 2026-04-16T19:31:06.185206+0000 mon.vm01 (mon.0) 1126 : audit [DBG] from='client.? 192.168.123.101:0/813762927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:07.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:06 vm04 bash[34817]: audit 2026-04-16T19:31:05.769034+0000 mgr.vm01.nwhpas (mgr.14227) 581 : audit [DBG] from='client.15748 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:07.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:06 vm04 bash[34817]: audit 2026-04-16T19:31:05.769034+0000 mgr.vm01.nwhpas (mgr.14227) 581 : audit [DBG] from='client.15748 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:07.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:06 vm04 bash[34817]: audit 2026-04-16T19:31:06.185206+0000 mon.vm01 (mon.0) 1126 : audit [DBG] from='client.? 192.168.123.101:0/813762927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:07.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:06 vm04 bash[34817]: audit 2026-04-16T19:31:06.185206+0000 mon.vm01 (mon.0) 1126 : audit [DBG] from='client.? 192.168.123.101:0/813762927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:08.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:07 vm04 bash[34817]: audit 2026-04-16T19:31:05.962850+0000 mgr.vm01.nwhpas (mgr.14227) 582 : audit [DBG] from='client.15752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:08.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:07 vm04 bash[34817]: audit 2026-04-16T19:31:05.962850+0000 mgr.vm01.nwhpas (mgr.14227) 582 : audit [DBG] from='client.15752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:08.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:07 vm04 bash[34817]: cluster 2026-04-16T19:31:06.708587+0000 mgr.vm01.nwhpas (mgr.14227) 583 : cluster [DBG] pgmap v315: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:08.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:07 vm04 bash[34817]: cluster 2026-04-16T19:31:06.708587+0000 mgr.vm01.nwhpas (mgr.14227) 583 : cluster [DBG] pgmap v315: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:08.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:07 vm04 bash[34817]: audit 2026-04-16T19:31:07.576277+0000 mon.vm01 (mon.0) 1127 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:08.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:07 vm04 bash[34817]: audit 2026-04-16T19:31:07.576277+0000 mon.vm01 (mon.0) 1127 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:07 vm01 bash[28222]: audit 2026-04-16T19:31:05.962850+0000 mgr.vm01.nwhpas (mgr.14227) 582 : audit [DBG] from='client.15752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:07 vm01 bash[28222]: audit 2026-04-16T19:31:05.962850+0000 mgr.vm01.nwhpas (mgr.14227) 582 : audit [DBG] from='client.15752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:07 vm01 bash[28222]: cluster 2026-04-16T19:31:06.708587+0000 mgr.vm01.nwhpas (mgr.14227) 583 : cluster [DBG] pgmap v315: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:07 vm01 bash[28222]: cluster 2026-04-16T19:31:06.708587+0000 mgr.vm01.nwhpas (mgr.14227) 583 : cluster [DBG] pgmap v315: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:07 vm01 bash[28222]: audit 2026-04-16T19:31:07.576277+0000 mon.vm01 (mon.0) 1127 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:07 vm01 bash[28222]: audit 2026-04-16T19:31:07.576277+0000 mon.vm01 (mon.0) 1127 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:10.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:09 vm04 bash[34817]: cluster 2026-04-16T19:31:08.708997+0000 mgr.vm01.nwhpas (mgr.14227) 584 : cluster [DBG] pgmap v316: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:10.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:09 vm04 bash[34817]: cluster 2026-04-16T19:31:08.708997+0000 mgr.vm01.nwhpas (mgr.14227) 584 : cluster [DBG] pgmap v316: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:10.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:09 vm01 bash[28222]: cluster 2026-04-16T19:31:08.708997+0000 mgr.vm01.nwhpas (mgr.14227) 584 : cluster [DBG] pgmap v316: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:10.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:09 vm01 bash[28222]: cluster 2026-04-16T19:31:08.708997+0000 mgr.vm01.nwhpas (mgr.14227) 584 : cluster [DBG] pgmap v316: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:11.397 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:31:11.578 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:31:11.579 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (7m) 113s ago 8m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:31:11.579 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (118s) 113s ago 8m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:31:11.579 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 97s ago 8m - - 2026-04-16T19:31:11.579 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (8m) 97s ago 8m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:31:11.813 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:31:11.813 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:31:11.813 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:31:12.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:11 vm04 bash[34817]: cluster 2026-04-16T19:31:10.709428+0000 mgr.vm01.nwhpas (mgr.14227) 585 : cluster [DBG] pgmap v317: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:12.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:11 vm04 bash[34817]: cluster 2026-04-16T19:31:10.709428+0000 mgr.vm01.nwhpas (mgr.14227) 585 : cluster [DBG] pgmap v317: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:12.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:11 vm04 bash[34817]: audit 2026-04-16T19:31:11.808649+0000 mon.vm01 (mon.0) 1128 : audit [DBG] from='client.? 192.168.123.101:0/2000126589' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:12.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:11 vm04 bash[34817]: audit 2026-04-16T19:31:11.808649+0000 mon.vm01 (mon.0) 1128 : audit [DBG] from='client.? 192.168.123.101:0/2000126589' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:12.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:11 vm01 bash[28222]: cluster 2026-04-16T19:31:10.709428+0000 mgr.vm01.nwhpas (mgr.14227) 585 : cluster [DBG] pgmap v317: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:12.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:11 vm01 bash[28222]: cluster 2026-04-16T19:31:10.709428+0000 mgr.vm01.nwhpas (mgr.14227) 585 : cluster [DBG] pgmap v317: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:12.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:11 vm01 bash[28222]: audit 2026-04-16T19:31:11.808649+0000 mon.vm01 (mon.0) 1128 : audit [DBG] from='client.? 192.168.123.101:0/2000126589' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:12.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:11 vm01 bash[28222]: audit 2026-04-16T19:31:11.808649+0000 mon.vm01 (mon.0) 1128 : audit [DBG] from='client.? 192.168.123.101:0/2000126589' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:12 vm04 bash[34817]: audit 2026-04-16T19:31:11.375833+0000 mgr.vm01.nwhpas (mgr.14227) 586 : audit [DBG] from='client.15760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:12 vm04 bash[34817]: audit 2026-04-16T19:31:11.375833+0000 mgr.vm01.nwhpas (mgr.14227) 586 : audit [DBG] from='client.15760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:12 vm04 bash[34817]: audit 2026-04-16T19:31:11.571822+0000 mgr.vm01.nwhpas (mgr.14227) 587 : audit [DBG] from='client.15764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:13.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:12 vm04 bash[34817]: audit 2026-04-16T19:31:11.571822+0000 mgr.vm01.nwhpas (mgr.14227) 587 : audit [DBG] from='client.15764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:13.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:12 vm01 bash[28222]: audit 2026-04-16T19:31:11.375833+0000 mgr.vm01.nwhpas (mgr.14227) 586 : audit [DBG] from='client.15760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:13.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:12 vm01 bash[28222]: audit 2026-04-16T19:31:11.375833+0000 mgr.vm01.nwhpas (mgr.14227) 586 : audit [DBG] from='client.15760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:13.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:12 vm01 bash[28222]: audit 2026-04-16T19:31:11.571822+0000 mgr.vm01.nwhpas (mgr.14227) 587 : audit [DBG] from='client.15764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:13.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:12 vm01 bash[28222]: audit 2026-04-16T19:31:11.571822+0000 mgr.vm01.nwhpas (mgr.14227) 587 : audit [DBG] from='client.15764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:13 vm04 bash[34817]: cluster 2026-04-16T19:31:12.709914+0000 mgr.vm01.nwhpas (mgr.14227) 588 : cluster [DBG] pgmap v318: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:14.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:13 vm04 bash[34817]: cluster 2026-04-16T19:31:12.709914+0000 mgr.vm01.nwhpas (mgr.14227) 588 : cluster [DBG] pgmap v318: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:13 vm01 bash[28222]: cluster 2026-04-16T19:31:12.709914+0000 mgr.vm01.nwhpas (mgr.14227) 588 : cluster [DBG] pgmap v318: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:14.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:13 vm01 bash[28222]: cluster 2026-04-16T19:31:12.709914+0000 mgr.vm01.nwhpas (mgr.14227) 588 : cluster [DBG] pgmap v318: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:16.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:15 vm04 bash[34817]: cluster 2026-04-16T19:31:14.710315+0000 mgr.vm01.nwhpas (mgr.14227) 589 : cluster [DBG] pgmap v319: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:16.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:15 vm04 bash[34817]: cluster 2026-04-16T19:31:14.710315+0000 mgr.vm01.nwhpas (mgr.14227) 589 : cluster [DBG] pgmap v319: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:16.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:15 vm01 bash[28222]: cluster 2026-04-16T19:31:14.710315+0000 mgr.vm01.nwhpas (mgr.14227) 589 : cluster [DBG] pgmap v319: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:16.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:15 vm01 bash[28222]: cluster 2026-04-16T19:31:14.710315+0000 mgr.vm01.nwhpas (mgr.14227) 589 : cluster [DBG] pgmap v319: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:17.030 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:31:17.216 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:31:17.216 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (7m) 118s ago 8m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:31:17.216 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (2m) 118s ago 8m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:31:17.216 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 102s ago 8m - - 2026-04-16T19:31:17.216 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (8m) 102s ago 8m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:31:17.449 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:31:17.449 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:31:17.450 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:31:18.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:17 vm04 bash[34817]: cluster 2026-04-16T19:31:16.710803+0000 mgr.vm01.nwhpas (mgr.14227) 590 : cluster [DBG] pgmap v320: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:18.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:17 vm04 bash[34817]: cluster 2026-04-16T19:31:16.710803+0000 mgr.vm01.nwhpas (mgr.14227) 590 : cluster [DBG] pgmap v320: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:18.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:17 vm04 bash[34817]: audit 2026-04-16T19:31:17.445240+0000 mon.vm01 (mon.0) 1129 : audit [DBG] from='client.? 192.168.123.101:0/3747253741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:18.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:17 vm04 bash[34817]: audit 2026-04-16T19:31:17.445240+0000 mon.vm01 (mon.0) 1129 : audit [DBG] from='client.? 192.168.123.101:0/3747253741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:18.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:17 vm01 bash[28222]: cluster 2026-04-16T19:31:16.710803+0000 mgr.vm01.nwhpas (mgr.14227) 590 : cluster [DBG] pgmap v320: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:18.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:17 vm01 bash[28222]: cluster 2026-04-16T19:31:16.710803+0000 mgr.vm01.nwhpas (mgr.14227) 590 : cluster [DBG] pgmap v320: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:18.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:17 vm01 bash[28222]: audit 2026-04-16T19:31:17.445240+0000 mon.vm01 (mon.0) 1129 : audit [DBG] from='client.? 192.168.123.101:0/3747253741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:18.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:17 vm01 bash[28222]: audit 2026-04-16T19:31:17.445240+0000 mon.vm01 (mon.0) 1129 : audit [DBG] from='client.? 192.168.123.101:0/3747253741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:19.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:18 vm04 bash[34817]: audit 2026-04-16T19:31:17.009310+0000 mgr.vm01.nwhpas (mgr.14227) 591 : audit [DBG] from='client.15772 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:19.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:18 vm04 bash[34817]: audit 2026-04-16T19:31:17.009310+0000 mgr.vm01.nwhpas (mgr.14227) 591 : audit [DBG] from='client.15772 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:19.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:18 vm04 bash[34817]: audit 2026-04-16T19:31:17.208841+0000 mgr.vm01.nwhpas (mgr.14227) 592 : audit [DBG] from='client.15776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:19.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:18 vm04 bash[34817]: audit 2026-04-16T19:31:17.208841+0000 mgr.vm01.nwhpas (mgr.14227) 592 : audit [DBG] from='client.15776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:19.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:18 vm01 bash[28222]: audit 2026-04-16T19:31:17.009310+0000 mgr.vm01.nwhpas (mgr.14227) 591 : audit [DBG] from='client.15772 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:19.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:18 vm01 bash[28222]: audit 2026-04-16T19:31:17.009310+0000 mgr.vm01.nwhpas (mgr.14227) 591 : audit [DBG] from='client.15772 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:19.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:18 vm01 bash[28222]: audit 2026-04-16T19:31:17.208841+0000 mgr.vm01.nwhpas (mgr.14227) 592 : audit [DBG] from='client.15776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:19.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:18 vm01 bash[28222]: audit 2026-04-16T19:31:17.208841+0000 mgr.vm01.nwhpas (mgr.14227) 592 : audit [DBG] from='client.15776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:20.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:19 vm04 bash[34817]: cluster 2026-04-16T19:31:18.711182+0000 mgr.vm01.nwhpas (mgr.14227) 593 : cluster [DBG] pgmap v321: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:20.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:19 vm04 bash[34817]: cluster 2026-04-16T19:31:18.711182+0000 mgr.vm01.nwhpas (mgr.14227) 593 : cluster [DBG] pgmap v321: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:20.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:19 vm01 bash[28222]: cluster 2026-04-16T19:31:18.711182+0000 mgr.vm01.nwhpas (mgr.14227) 593 : cluster [DBG] pgmap v321: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:20.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:19 vm01 bash[28222]: cluster 2026-04-16T19:31:18.711182+0000 mgr.vm01.nwhpas (mgr.14227) 593 : cluster [DBG] pgmap v321: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:22.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:21 vm04 bash[34817]: cluster 2026-04-16T19:31:20.711582+0000 mgr.vm01.nwhpas (mgr.14227) 594 : cluster [DBG] pgmap v322: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:22.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:21 vm04 bash[34817]: cluster 2026-04-16T19:31:20.711582+0000 mgr.vm01.nwhpas (mgr.14227) 594 : cluster [DBG] pgmap v322: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:22.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:21 vm01 bash[28222]: cluster 2026-04-16T19:31:20.711582+0000 mgr.vm01.nwhpas (mgr.14227) 594 : cluster [DBG] pgmap v322: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:22.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:21 vm01 bash[28222]: cluster 2026-04-16T19:31:20.711582+0000 mgr.vm01.nwhpas (mgr.14227) 594 : cluster [DBG] pgmap v322: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:22.643 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:31:22.819 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:31:22.819 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (7m) 2m ago 8m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:31:22.819 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (2m) 2m ago 8m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:31:22.819 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 108s ago 8m - - 2026-04-16T19:31:22.819 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (8m) 108s ago 8m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:31:23.040 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:31:23.040 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:31:23.041 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:31:23.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:22 vm04 bash[34817]: audit 2026-04-16T19:31:22.576662+0000 mon.vm01 (mon.0) 1130 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:23.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:22 vm04 bash[34817]: audit 2026-04-16T19:31:22.576662+0000 mon.vm01 (mon.0) 1130 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:23.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:22 vm04 bash[34817]: audit 2026-04-16T19:31:22.624213+0000 mgr.vm01.nwhpas (mgr.14227) 595 : audit [DBG] from='client.15784 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:23.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:22 vm04 bash[34817]: audit 2026-04-16T19:31:22.624213+0000 mgr.vm01.nwhpas (mgr.14227) 595 : audit [DBG] from='client.15784 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:23.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:22 vm04 bash[34817]: cluster 2026-04-16T19:31:22.711924+0000 mgr.vm01.nwhpas (mgr.14227) 596 : cluster [DBG] pgmap v323: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:23.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:22 vm04 bash[34817]: cluster 2026-04-16T19:31:22.711924+0000 mgr.vm01.nwhpas (mgr.14227) 596 : cluster [DBG] pgmap v323: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:23.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:22 vm04 bash[34817]: audit 2026-04-16T19:31:22.812418+0000 mgr.vm01.nwhpas (mgr.14227) 597 : audit [DBG] from='client.15788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:23.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:22 vm04 bash[34817]: audit 2026-04-16T19:31:22.812418+0000 mgr.vm01.nwhpas (mgr.14227) 597 : audit [DBG] from='client.15788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:22 vm01 bash[28222]: audit 2026-04-16T19:31:22.576662+0000 mon.vm01 (mon.0) 1130 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:22 vm01 bash[28222]: audit 2026-04-16T19:31:22.576662+0000 mon.vm01 (mon.0) 1130 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:22 vm01 bash[28222]: audit 2026-04-16T19:31:22.624213+0000 mgr.vm01.nwhpas (mgr.14227) 595 : audit [DBG] from='client.15784 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:22 vm01 bash[28222]: audit 2026-04-16T19:31:22.624213+0000 mgr.vm01.nwhpas (mgr.14227) 595 : audit [DBG] from='client.15784 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:22 vm01 bash[28222]: cluster 2026-04-16T19:31:22.711924+0000 mgr.vm01.nwhpas (mgr.14227) 596 : cluster [DBG] pgmap v323: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:22 vm01 bash[28222]: cluster 2026-04-16T19:31:22.711924+0000 mgr.vm01.nwhpas (mgr.14227) 596 : cluster [DBG] pgmap v323: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:22 vm01 bash[28222]: audit 2026-04-16T19:31:22.812418+0000 mgr.vm01.nwhpas (mgr.14227) 597 : audit [DBG] from='client.15788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:22 vm01 bash[28222]: audit 2026-04-16T19:31:22.812418+0000 mgr.vm01.nwhpas (mgr.14227) 597 : audit [DBG] from='client.15788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:24.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:23 vm04 bash[34817]: audit 2026-04-16T19:31:23.036443+0000 mon.vm01 (mon.0) 1131 : audit [DBG] from='client.? 192.168.123.101:0/3544771900' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:24.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:23 vm04 bash[34817]: audit 2026-04-16T19:31:23.036443+0000 mon.vm01 (mon.0) 1131 : audit [DBG] from='client.? 192.168.123.101:0/3544771900' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:24.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:23 vm01 bash[28222]: audit 2026-04-16T19:31:23.036443+0000 mon.vm01 (mon.0) 1131 : audit [DBG] from='client.? 192.168.123.101:0/3544771900' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:24.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:23 vm01 bash[28222]: audit 2026-04-16T19:31:23.036443+0000 mon.vm01 (mon.0) 1131 : audit [DBG] from='client.? 192.168.123.101:0/3544771900' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:25.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:24 vm04 bash[34817]: cluster 2026-04-16T19:31:24.712398+0000 mgr.vm01.nwhpas (mgr.14227) 598 : cluster [DBG] pgmap v324: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:25.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:24 vm04 bash[34817]: cluster 2026-04-16T19:31:24.712398+0000 mgr.vm01.nwhpas (mgr.14227) 598 : cluster [DBG] pgmap v324: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:25.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:24 vm01 bash[28222]: cluster 2026-04-16T19:31:24.712398+0000 mgr.vm01.nwhpas (mgr.14227) 598 : cluster [DBG] pgmap v324: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:25.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:24 vm01 bash[28222]: cluster 2026-04-16T19:31:24.712398+0000 mgr.vm01.nwhpas (mgr.14227) 598 : cluster [DBG] pgmap v324: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:28.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:27 vm04 bash[34817]: cluster 2026-04-16T19:31:26.712790+0000 mgr.vm01.nwhpas (mgr.14227) 599 : cluster [DBG] pgmap v325: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:28.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:27 vm04 bash[34817]: cluster 2026-04-16T19:31:26.712790+0000 mgr.vm01.nwhpas (mgr.14227) 599 : cluster [DBG] pgmap v325: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:28.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:27 vm01 bash[28222]: cluster 2026-04-16T19:31:26.712790+0000 mgr.vm01.nwhpas (mgr.14227) 599 : cluster [DBG] pgmap v325: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:28.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:27 vm01 bash[28222]: cluster 2026-04-16T19:31:26.712790+0000 mgr.vm01.nwhpas (mgr.14227) 599 : cluster [DBG] pgmap v325: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:28.247 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:31:28.426 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:31:28.426 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (7m) 2m ago 8m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:31:28.426 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (2m) 2m ago 8m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:31:28.426 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 114s ago 8m - - 2026-04-16T19:31:28.426 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (8m) 114s ago 8m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:31:28.662 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:31:28.662 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:31:28.662 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:31:29.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:28 vm04 bash[34817]: audit 2026-04-16T19:31:28.658065+0000 mon.vm01 (mon.0) 1132 : audit [DBG] from='client.? 192.168.123.101:0/2648005873' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:29.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:28 vm04 bash[34817]: audit 2026-04-16T19:31:28.658065+0000 mon.vm01 (mon.0) 1132 : audit [DBG] from='client.? 192.168.123.101:0/2648005873' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:29.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:28 vm01 bash[28222]: audit 2026-04-16T19:31:28.658065+0000 mon.vm01 (mon.0) 1132 : audit [DBG] from='client.? 192.168.123.101:0/2648005873' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:29.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:28 vm01 bash[28222]: audit 2026-04-16T19:31:28.658065+0000 mon.vm01 (mon.0) 1132 : audit [DBG] from='client.? 192.168.123.101:0/2648005873' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:30.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:29 vm04 bash[34817]: audit 2026-04-16T19:31:28.225755+0000 mgr.vm01.nwhpas (mgr.14227) 600 : audit [DBG] from='client.25143 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:30.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:29 vm04 bash[34817]: audit 2026-04-16T19:31:28.225755+0000 mgr.vm01.nwhpas (mgr.14227) 600 : audit [DBG] from='client.25143 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:30.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:29 vm04 bash[34817]: audit 2026-04-16T19:31:28.419753+0000 mgr.vm01.nwhpas (mgr.14227) 601 : audit [DBG] from='client.15800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:30.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:29 vm04 bash[34817]: audit 2026-04-16T19:31:28.419753+0000 mgr.vm01.nwhpas (mgr.14227) 601 : audit [DBG] from='client.15800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:30.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:29 vm04 bash[34817]: cluster 2026-04-16T19:31:28.713138+0000 mgr.vm01.nwhpas (mgr.14227) 602 : cluster [DBG] pgmap v326: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:30.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:29 vm04 bash[34817]: cluster 2026-04-16T19:31:28.713138+0000 mgr.vm01.nwhpas (mgr.14227) 602 : cluster [DBG] pgmap v326: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:30.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:29 vm01 bash[28222]: audit 2026-04-16T19:31:28.225755+0000 mgr.vm01.nwhpas (mgr.14227) 600 : audit [DBG] from='client.25143 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:30.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:29 vm01 bash[28222]: audit 2026-04-16T19:31:28.225755+0000 mgr.vm01.nwhpas (mgr.14227) 600 : audit [DBG] from='client.25143 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:30.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:29 vm01 bash[28222]: audit 2026-04-16T19:31:28.419753+0000 mgr.vm01.nwhpas (mgr.14227) 601 : audit [DBG] from='client.15800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:30.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:29 vm01 bash[28222]: audit 2026-04-16T19:31:28.419753+0000 mgr.vm01.nwhpas (mgr.14227) 601 : audit [DBG] from='client.15800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:30.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:29 vm01 bash[28222]: cluster 2026-04-16T19:31:28.713138+0000 mgr.vm01.nwhpas (mgr.14227) 602 : cluster [DBG] pgmap v326: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:30.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:29 vm01 bash[28222]: cluster 2026-04-16T19:31:28.713138+0000 mgr.vm01.nwhpas (mgr.14227) 602 : cluster [DBG] pgmap v326: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:32.209 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:31 vm04 bash[34817]: cluster 2026-04-16T19:31:30.713539+0000 mgr.vm01.nwhpas (mgr.14227) 603 : cluster [DBG] pgmap v327: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:32.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:31 vm04 bash[34817]: cluster 2026-04-16T19:31:30.713539+0000 mgr.vm01.nwhpas (mgr.14227) 603 : cluster [DBG] pgmap v327: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:31 vm01 bash[28222]: cluster 2026-04-16T19:31:30.713539+0000 mgr.vm01.nwhpas (mgr.14227) 603 : cluster [DBG] pgmap v327: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:31 vm01 bash[28222]: cluster 2026-04-16T19:31:30.713539+0000 mgr.vm01.nwhpas (mgr.14227) 603 : cluster [DBG] pgmap v327: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:33.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:32 vm04 bash[34817]: cluster 2026-04-16T19:31:32.713918+0000 mgr.vm01.nwhpas (mgr.14227) 604 : cluster [DBG] pgmap v328: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:33.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:32 vm04 bash[34817]: cluster 2026-04-16T19:31:32.713918+0000 mgr.vm01.nwhpas (mgr.14227) 604 : cluster [DBG] pgmap v328: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:33.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:32 vm01 bash[28222]: cluster 2026-04-16T19:31:32.713918+0000 mgr.vm01.nwhpas (mgr.14227) 604 : cluster [DBG] pgmap v328: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:33.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:32 vm01 bash[28222]: cluster 2026-04-16T19:31:32.713918+0000 mgr.vm01.nwhpas (mgr.14227) 604 : cluster [DBG] pgmap v328: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:33.879 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:31:34.082 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:31:34.082 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (7m) 2m ago 8m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:31:34.082 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (2m) 2m ago 8m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:31:34.082 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 119s ago 8m - - 2026-04-16T19:31:34.082 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (8m) 119s ago 8m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:31:34.311 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:31:34.311 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:31:34.311 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:31:34.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:33 vm04 bash[34817]: audit 2026-04-16T19:31:33.859998+0000 mgr.vm01.nwhpas (mgr.14227) 605 : audit [DBG] from='client.15808 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:34.459 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:33 vm04 bash[34817]: audit 2026-04-16T19:31:33.859998+0000 mgr.vm01.nwhpas (mgr.14227) 605 : audit [DBG] from='client.15808 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:34.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:33 vm01 bash[28222]: audit 2026-04-16T19:31:33.859998+0000 mgr.vm01.nwhpas (mgr.14227) 605 : audit [DBG] from='client.15808 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:34.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:33 vm01 bash[28222]: audit 2026-04-16T19:31:33.859998+0000 mgr.vm01.nwhpas (mgr.14227) 605 : audit [DBG] from='client.15808 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:35.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:35 vm04 bash[34817]: audit 2026-04-16T19:31:34.074685+0000 mgr.vm01.nwhpas (mgr.14227) 606 : audit [DBG] from='client.15812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:35.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:35 vm04 bash[34817]: audit 2026-04-16T19:31:34.074685+0000 mgr.vm01.nwhpas (mgr.14227) 606 : audit [DBG] from='client.15812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:35.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:35 vm04 bash[34817]: audit 2026-04-16T19:31:34.306737+0000 mon.vm01 (mon.0) 1133 : audit [DBG] from='client.? 192.168.123.101:0/3010176088' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:35.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:35 vm04 bash[34817]: audit 2026-04-16T19:31:34.306737+0000 mon.vm01 (mon.0) 1133 : audit [DBG] from='client.? 192.168.123.101:0/3010176088' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:35.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:35 vm04 bash[34817]: cluster 2026-04-16T19:31:34.714405+0000 mgr.vm01.nwhpas (mgr.14227) 607 : cluster [DBG] pgmap v329: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:35.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:35 vm04 bash[34817]: cluster 2026-04-16T19:31:34.714405+0000 mgr.vm01.nwhpas (mgr.14227) 607 : cluster [DBG] pgmap v329: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:35.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:35 vm04 bash[34817]: audit 2026-04-16T19:31:34.722619+0000 mon.vm01 (mon.0) 1134 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:31:35.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:35 vm04 bash[34817]: audit 2026-04-16T19:31:34.722619+0000 mon.vm01 (mon.0) 1134 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:31:35.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:35 vm01 bash[28222]: audit 2026-04-16T19:31:34.074685+0000 mgr.vm01.nwhpas (mgr.14227) 606 : audit [DBG] from='client.15812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:35.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:35 vm01 bash[28222]: audit 2026-04-16T19:31:34.074685+0000 mgr.vm01.nwhpas (mgr.14227) 606 : audit [DBG] from='client.15812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:35.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:35 vm01 bash[28222]: audit 2026-04-16T19:31:34.306737+0000 mon.vm01 (mon.0) 1133 : audit [DBG] from='client.? 192.168.123.101:0/3010176088' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:35.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:35 vm01 bash[28222]: audit 2026-04-16T19:31:34.306737+0000 mon.vm01 (mon.0) 1133 : audit [DBG] from='client.? 192.168.123.101:0/3010176088' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:35.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:35 vm01 bash[28222]: cluster 2026-04-16T19:31:34.714405+0000 mgr.vm01.nwhpas (mgr.14227) 607 : cluster [DBG] pgmap v329: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:35.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:35 vm01 bash[28222]: cluster 2026-04-16T19:31:34.714405+0000 mgr.vm01.nwhpas (mgr.14227) 607 : cluster [DBG] pgmap v329: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:35.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:35 vm01 bash[28222]: audit 2026-04-16T19:31:34.722619+0000 mon.vm01 (mon.0) 1134 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:31:35.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:35 vm01 bash[28222]: audit 2026-04-16T19:31:34.722619+0000 mon.vm01 (mon.0) 1134 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.028768+0000 mon.vm01 (mon.0) 1135 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.028768+0000 mon.vm01 (mon.0) 1135 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.034127+0000 mon.vm01 (mon.0) 1136 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.034127+0000 mon.vm01 (mon.0) 1136 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.365791+0000 mon.vm01 (mon.0) 1137 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.365791+0000 mon.vm01 (mon.0) 1137 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.366443+0000 mon.vm01 (mon.0) 1138 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.366443+0000 mon.vm01 (mon.0) 1138 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: cluster 2026-04-16T19:31:35.367625+0000 mgr.vm01.nwhpas (mgr.14227) 608 : cluster [DBG] pgmap v330: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: cluster 2026-04-16T19:31:35.367625+0000 mgr.vm01.nwhpas (mgr.14227) 608 : cluster [DBG] pgmap v330: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.371358+0000 mon.vm01 (mon.0) 1139 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.371358+0000 mon.vm01 (mon.0) 1139 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.372898+0000 mon.vm01 (mon.0) 1140 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:31:36.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:36 vm04 bash[34817]: audit 2026-04-16T19:31:35.372898+0000 mon.vm01 (mon.0) 1140 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.028768+0000 mon.vm01 (mon.0) 1135 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.028768+0000 mon.vm01 (mon.0) 1135 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.034127+0000 mon.vm01 (mon.0) 1136 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.034127+0000 mon.vm01 (mon.0) 1136 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.365791+0000 mon.vm01 (mon.0) 1137 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.365791+0000 mon.vm01 (mon.0) 1137 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.366443+0000 mon.vm01 (mon.0) 1138 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.366443+0000 mon.vm01 (mon.0) 1138 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: cluster 2026-04-16T19:31:35.367625+0000 mgr.vm01.nwhpas (mgr.14227) 608 : cluster [DBG] pgmap v330: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: cluster 2026-04-16T19:31:35.367625+0000 mgr.vm01.nwhpas (mgr.14227) 608 : cluster [DBG] pgmap v330: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.371358+0000 mon.vm01 (mon.0) 1139 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.371358+0000 mon.vm01 (mon.0) 1139 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.372898+0000 mon.vm01 (mon.0) 1140 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:31:36.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:36 vm01 bash[28222]: audit 2026-04-16T19:31:35.372898+0000 mon.vm01 (mon.0) 1140 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:31:38.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:38 vm04 bash[34817]: cluster 2026-04-16T19:31:37.367993+0000 mgr.vm01.nwhpas (mgr.14227) 609 : cluster [DBG] pgmap v331: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:38.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:38 vm04 bash[34817]: cluster 2026-04-16T19:31:37.367993+0000 mgr.vm01.nwhpas (mgr.14227) 609 : cluster [DBG] pgmap v331: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:38.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:38 vm04 bash[34817]: audit 2026-04-16T19:31:37.576898+0000 mon.vm01 (mon.0) 1141 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:38.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:38 vm04 bash[34817]: audit 2026-04-16T19:31:37.576898+0000 mon.vm01 (mon.0) 1141 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:38.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:38 vm01 bash[28222]: cluster 2026-04-16T19:31:37.367993+0000 mgr.vm01.nwhpas (mgr.14227) 609 : cluster [DBG] pgmap v331: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:38.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:38 vm01 bash[28222]: cluster 2026-04-16T19:31:37.367993+0000 mgr.vm01.nwhpas (mgr.14227) 609 : cluster [DBG] pgmap v331: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:38.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:38 vm01 bash[28222]: audit 2026-04-16T19:31:37.576898+0000 mon.vm01 (mon.0) 1141 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:38.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:38 vm01 bash[28222]: audit 2026-04-16T19:31:37.576898+0000 mon.vm01 (mon.0) 1141 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:39.556 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:31:39.752 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:31:39.752 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (7m) 2m ago 8m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:31:39.752 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (2m) 2m ago 8m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:31:39.752 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 2m ago 8m - - 2026-04-16T19:31:39.752 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (8m) 2m ago 8m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:31:40.022 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:31:40.022 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:31:40.022 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:31:40.709 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:40 vm04 bash[34817]: cluster 2026-04-16T19:31:39.368482+0000 mgr.vm01.nwhpas (mgr.14227) 610 : cluster [DBG] pgmap v332: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:40.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:40 vm04 bash[34817]: cluster 2026-04-16T19:31:39.368482+0000 mgr.vm01.nwhpas (mgr.14227) 610 : cluster [DBG] pgmap v332: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:40.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:40 vm04 bash[34817]: audit 2026-04-16T19:31:39.529713+0000 mgr.vm01.nwhpas (mgr.14227) 611 : audit [DBG] from='client.15820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:40.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:40 vm04 bash[34817]: audit 2026-04-16T19:31:39.529713+0000 mgr.vm01.nwhpas (mgr.14227) 611 : audit [DBG] from='client.15820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:40.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:40 vm04 bash[34817]: audit 2026-04-16T19:31:39.745869+0000 mgr.vm01.nwhpas (mgr.14227) 612 : audit [DBG] from='client.15824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:40.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:40 vm04 bash[34817]: audit 2026-04-16T19:31:39.745869+0000 mgr.vm01.nwhpas (mgr.14227) 612 : audit [DBG] from='client.15824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:40.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:40 vm04 bash[34817]: audit 2026-04-16T19:31:40.017846+0000 mon.vm01 (mon.0) 1142 : audit [DBG] from='client.? 192.168.123.101:0/3929424523' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:40.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:40 vm04 bash[34817]: audit 2026-04-16T19:31:40.017846+0000 mon.vm01 (mon.0) 1142 : audit [DBG] from='client.? 192.168.123.101:0/3929424523' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:40.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:40 vm01 bash[28222]: cluster 2026-04-16T19:31:39.368482+0000 mgr.vm01.nwhpas (mgr.14227) 610 : cluster [DBG] pgmap v332: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:40.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:40 vm01 bash[28222]: cluster 2026-04-16T19:31:39.368482+0000 mgr.vm01.nwhpas (mgr.14227) 610 : cluster [DBG] pgmap v332: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:40.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:40 vm01 bash[28222]: audit 2026-04-16T19:31:39.529713+0000 mgr.vm01.nwhpas (mgr.14227) 611 : audit [DBG] from='client.15820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:40.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:40 vm01 bash[28222]: audit 2026-04-16T19:31:39.529713+0000 mgr.vm01.nwhpas (mgr.14227) 611 : audit [DBG] from='client.15820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:40.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:40 vm01 bash[28222]: audit 2026-04-16T19:31:39.745869+0000 mgr.vm01.nwhpas (mgr.14227) 612 : audit [DBG] from='client.15824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:40.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:40 vm01 bash[28222]: audit 2026-04-16T19:31:39.745869+0000 mgr.vm01.nwhpas (mgr.14227) 612 : audit [DBG] from='client.15824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:40.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:40 vm01 bash[28222]: audit 2026-04-16T19:31:40.017846+0000 mon.vm01 (mon.0) 1142 : audit [DBG] from='client.? 192.168.123.101:0/3929424523' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:40.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:40 vm01 bash[28222]: audit 2026-04-16T19:31:40.017846+0000 mon.vm01 (mon.0) 1142 : audit [DBG] from='client.? 192.168.123.101:0/3929424523' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:42.959 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:42 vm04 bash[34817]: cluster 2026-04-16T19:31:41.368889+0000 mgr.vm01.nwhpas (mgr.14227) 613 : cluster [DBG] pgmap v333: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:42.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:42 vm04 bash[34817]: cluster 2026-04-16T19:31:41.368889+0000 mgr.vm01.nwhpas (mgr.14227) 613 : cluster [DBG] pgmap v333: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:42.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:42 vm01 bash[28222]: cluster 2026-04-16T19:31:41.368889+0000 mgr.vm01.nwhpas (mgr.14227) 613 : cluster [DBG] pgmap v333: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:42.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:42 vm01 bash[28222]: cluster 2026-04-16T19:31:41.368889+0000 mgr.vm01.nwhpas (mgr.14227) 613 : cluster [DBG] pgmap v333: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:31:44.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:44 vm04 bash[34817]: cluster 2026-04-16T19:31:43.369338+0000 mgr.vm01.nwhpas (mgr.14227) 614 : cluster [DBG] pgmap v334: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:44.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:44 vm04 bash[34817]: cluster 2026-04-16T19:31:43.369338+0000 mgr.vm01.nwhpas (mgr.14227) 614 : cluster [DBG] pgmap v334: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:44.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:44 vm01 bash[28222]: cluster 2026-04-16T19:31:43.369338+0000 mgr.vm01.nwhpas (mgr.14227) 614 : cluster [DBG] pgmap v334: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:44.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:44 vm01 bash[28222]: cluster 2026-04-16T19:31:43.369338+0000 mgr.vm01.nwhpas (mgr.14227) 614 : cluster [DBG] pgmap v334: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:45.215 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:31:45.398 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:31:45.398 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (7m) 2m ago 8m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:31:45.398 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (2m) 2m ago 8m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:31:45.398 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 2m ago 8m - - 2026-04-16T19:31:45.398 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (8m) 2m ago 8m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:31:45.639 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:31:45.639 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:31:45.639 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:31:46.847 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:46 vm04 bash[34817]: audit 2026-04-16T19:31:45.195781+0000 mgr.vm01.nwhpas (mgr.14227) 615 : audit [DBG] from='client.15832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:46.847 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:46 vm04 bash[34817]: audit 2026-04-16T19:31:45.195781+0000 mgr.vm01.nwhpas (mgr.14227) 615 : audit [DBG] from='client.15832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:46.847 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:46 vm04 bash[34817]: cluster 2026-04-16T19:31:45.369773+0000 mgr.vm01.nwhpas (mgr.14227) 616 : cluster [DBG] pgmap v335: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:46.847 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:46 vm04 bash[34817]: cluster 2026-04-16T19:31:45.369773+0000 mgr.vm01.nwhpas (mgr.14227) 616 : cluster [DBG] pgmap v335: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:46.847 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:46 vm04 bash[34817]: audit 2026-04-16T19:31:45.391647+0000 mgr.vm01.nwhpas (mgr.14227) 617 : audit [DBG] from='client.15836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:46.847 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:46 vm04 bash[34817]: audit 2026-04-16T19:31:45.391647+0000 mgr.vm01.nwhpas (mgr.14227) 617 : audit [DBG] from='client.15836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:46.847 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:46 vm04 bash[34817]: audit 2026-04-16T19:31:45.634773+0000 mon.vm01 (mon.0) 1143 : audit [DBG] from='client.? 192.168.123.101:0/814654686' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:46.847 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:46 vm04 bash[34817]: audit 2026-04-16T19:31:45.634773+0000 mon.vm01 (mon.0) 1143 : audit [DBG] from='client.? 192.168.123.101:0/814654686' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:46.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:46 vm01 bash[28222]: audit 2026-04-16T19:31:45.195781+0000 mgr.vm01.nwhpas (mgr.14227) 615 : audit [DBG] from='client.15832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:46.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:46 vm01 bash[28222]: audit 2026-04-16T19:31:45.195781+0000 mgr.vm01.nwhpas (mgr.14227) 615 : audit [DBG] from='client.15832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:46.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:46 vm01 bash[28222]: cluster 2026-04-16T19:31:45.369773+0000 mgr.vm01.nwhpas (mgr.14227) 616 : cluster [DBG] pgmap v335: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:46.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:46 vm01 bash[28222]: cluster 2026-04-16T19:31:45.369773+0000 mgr.vm01.nwhpas (mgr.14227) 616 : cluster [DBG] pgmap v335: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 B/s rd, 192 B/s wr, 0 op/s 2026-04-16T19:31:46.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:46 vm01 bash[28222]: audit 2026-04-16T19:31:45.391647+0000 mgr.vm01.nwhpas (mgr.14227) 617 : audit [DBG] from='client.15836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:46.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:46 vm01 bash[28222]: audit 2026-04-16T19:31:45.391647+0000 mgr.vm01.nwhpas (mgr.14227) 617 : audit [DBG] from='client.15836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:46.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:46 vm01 bash[28222]: audit 2026-04-16T19:31:45.634773+0000 mon.vm01 (mon.0) 1143 : audit [DBG] from='client.? 192.168.123.101:0/814654686' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:46.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:46 vm01 bash[28222]: audit 2026-04-16T19:31:45.634773+0000 mon.vm01 (mon.0) 1143 : audit [DBG] from='client.? 192.168.123.101:0/814654686' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:48.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:48 vm04 bash[34817]: cluster 2026-04-16T19:31:47.370153+0000 mgr.vm01.nwhpas (mgr.14227) 618 : cluster [DBG] pgmap v336: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:48.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:48 vm04 bash[34817]: cluster 2026-04-16T19:31:47.370153+0000 mgr.vm01.nwhpas (mgr.14227) 618 : cluster [DBG] pgmap v336: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:48.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:48 vm01 bash[28222]: cluster 2026-04-16T19:31:47.370153+0000 mgr.vm01.nwhpas (mgr.14227) 618 : cluster [DBG] pgmap v336: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:48.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:48 vm01 bash[28222]: cluster 2026-04-16T19:31:47.370153+0000 mgr.vm01.nwhpas (mgr.14227) 618 : cluster [DBG] pgmap v336: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:50.845 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:31:50.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:50 vm04 bash[34817]: cluster 2026-04-16T19:31:49.370614+0000 mgr.vm01.nwhpas (mgr.14227) 619 : cluster [DBG] pgmap v337: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:50.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:50 vm04 bash[34817]: cluster 2026-04-16T19:31:49.370614+0000 mgr.vm01.nwhpas (mgr.14227) 619 : cluster [DBG] pgmap v337: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:50.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:50 vm01 bash[28222]: cluster 2026-04-16T19:31:49.370614+0000 mgr.vm01.nwhpas (mgr.14227) 619 : cluster [DBG] pgmap v337: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:50.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:50 vm01 bash[28222]: cluster 2026-04-16T19:31:49.370614+0000 mgr.vm01.nwhpas (mgr.14227) 619 : cluster [DBG] pgmap v337: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:51.032 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:31:51.034 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (7m) 2m ago 8m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:31:51.034 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (2m) 2m ago 8m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:31:51.034 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 2m ago 8m - - 2026-04-16T19:31:51.034 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (8m) 2m ago 8m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:31:51.272 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:31:51.272 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:31:51.272 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:31:51.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:51 vm04 bash[34817]: audit 2026-04-16T19:31:50.819621+0000 mgr.vm01.nwhpas (mgr.14227) 620 : audit [DBG] from='client.15844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:51.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:51 vm04 bash[34817]: audit 2026-04-16T19:31:50.819621+0000 mgr.vm01.nwhpas (mgr.14227) 620 : audit [DBG] from='client.15844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:51.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:51 vm04 bash[34817]: audit 2026-04-16T19:31:51.267645+0000 mon.vm01 (mon.0) 1144 : audit [DBG] from='client.? 192.168.123.101:0/1792878113' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:51.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:51 vm04 bash[34817]: audit 2026-04-16T19:31:51.267645+0000 mon.vm01 (mon.0) 1144 : audit [DBG] from='client.? 192.168.123.101:0/1792878113' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:51 vm01 bash[28222]: audit 2026-04-16T19:31:50.819621+0000 mgr.vm01.nwhpas (mgr.14227) 620 : audit [DBG] from='client.15844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:51 vm01 bash[28222]: audit 2026-04-16T19:31:50.819621+0000 mgr.vm01.nwhpas (mgr.14227) 620 : audit [DBG] from='client.15844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:51 vm01 bash[28222]: audit 2026-04-16T19:31:51.267645+0000 mon.vm01 (mon.0) 1144 : audit [DBG] from='client.? 192.168.123.101:0/1792878113' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:51.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:51 vm01 bash[28222]: audit 2026-04-16T19:31:51.267645+0000 mon.vm01 (mon.0) 1144 : audit [DBG] from='client.? 192.168.123.101:0/1792878113' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:52.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:52 vm01 bash[28222]: audit 2026-04-16T19:31:51.024665+0000 mgr.vm01.nwhpas (mgr.14227) 621 : audit [DBG] from='client.15848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:52.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:52 vm01 bash[28222]: audit 2026-04-16T19:31:51.024665+0000 mgr.vm01.nwhpas (mgr.14227) 621 : audit [DBG] from='client.15848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:52.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:52 vm01 bash[28222]: cluster 2026-04-16T19:31:51.370996+0000 mgr.vm01.nwhpas (mgr.14227) 622 : cluster [DBG] pgmap v338: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:52.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:52 vm01 bash[28222]: cluster 2026-04-16T19:31:51.370996+0000 mgr.vm01.nwhpas (mgr.14227) 622 : cluster [DBG] pgmap v338: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:52.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:52 vm01 bash[28222]: audit 2026-04-16T19:31:52.577185+0000 mon.vm01 (mon.0) 1145 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:52.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:52 vm01 bash[28222]: audit 2026-04-16T19:31:52.577185+0000 mon.vm01 (mon.0) 1145 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:53.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:52 vm04 bash[34817]: audit 2026-04-16T19:31:51.024665+0000 mgr.vm01.nwhpas (mgr.14227) 621 : audit [DBG] from='client.15848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:53.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:52 vm04 bash[34817]: audit 2026-04-16T19:31:51.024665+0000 mgr.vm01.nwhpas (mgr.14227) 621 : audit [DBG] from='client.15848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:53.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:52 vm04 bash[34817]: cluster 2026-04-16T19:31:51.370996+0000 mgr.vm01.nwhpas (mgr.14227) 622 : cluster [DBG] pgmap v338: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:53.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:52 vm04 bash[34817]: cluster 2026-04-16T19:31:51.370996+0000 mgr.vm01.nwhpas (mgr.14227) 622 : cluster [DBG] pgmap v338: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:53.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:52 vm04 bash[34817]: audit 2026-04-16T19:31:52.577185+0000 mon.vm01 (mon.0) 1145 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:53.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:52 vm04 bash[34817]: audit 2026-04-16T19:31:52.577185+0000 mon.vm01 (mon.0) 1145 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:31:55.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:54 vm04 bash[34817]: cluster 2026-04-16T19:31:53.371364+0000 mgr.vm01.nwhpas (mgr.14227) 623 : cluster [DBG] pgmap v339: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:55.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:54 vm04 bash[34817]: cluster 2026-04-16T19:31:53.371364+0000 mgr.vm01.nwhpas (mgr.14227) 623 : cluster [DBG] pgmap v339: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:54 vm01 bash[28222]: cluster 2026-04-16T19:31:53.371364+0000 mgr.vm01.nwhpas (mgr.14227) 623 : cluster [DBG] pgmap v339: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:54 vm01 bash[28222]: cluster 2026-04-16T19:31:53.371364+0000 mgr.vm01.nwhpas (mgr.14227) 623 : cluster [DBG] pgmap v339: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:31:56.483 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:31:56.676 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:31:56.677 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (8m) 2m ago 8m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:31:56.677 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (2m) 2m ago 8m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:31:56.677 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 2m ago 8m - - 2026-04-16T19:31:56.677 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (8m) 2m ago 8m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:31:56.923 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:31:56.923 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:31:56.923 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:31:56.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:56 vm01 bash[28222]: cluster 2026-04-16T19:31:55.371901+0000 mgr.vm01.nwhpas (mgr.14227) 624 : cluster [DBG] pgmap v340: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:56.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:56 vm01 bash[28222]: cluster 2026-04-16T19:31:55.371901+0000 mgr.vm01.nwhpas (mgr.14227) 624 : cluster [DBG] pgmap v340: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:57.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:56 vm04 bash[34817]: cluster 2026-04-16T19:31:55.371901+0000 mgr.vm01.nwhpas (mgr.14227) 624 : cluster [DBG] pgmap v340: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:57.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:56 vm04 bash[34817]: cluster 2026-04-16T19:31:55.371901+0000 mgr.vm01.nwhpas (mgr.14227) 624 : cluster [DBG] pgmap v340: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:58.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:57 vm04 bash[34817]: audit 2026-04-16T19:31:56.458564+0000 mgr.vm01.nwhpas (mgr.14227) 625 : audit [DBG] from='client.15856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:58.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:57 vm04 bash[34817]: audit 2026-04-16T19:31:56.458564+0000 mgr.vm01.nwhpas (mgr.14227) 625 : audit [DBG] from='client.15856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:58.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:57 vm04 bash[34817]: audit 2026-04-16T19:31:56.669779+0000 mgr.vm01.nwhpas (mgr.14227) 626 : audit [DBG] from='client.15860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:58.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:57 vm04 bash[34817]: audit 2026-04-16T19:31:56.669779+0000 mgr.vm01.nwhpas (mgr.14227) 626 : audit [DBG] from='client.15860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:58.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:57 vm04 bash[34817]: audit 2026-04-16T19:31:56.918655+0000 mon.vm01 (mon.0) 1146 : audit [DBG] from='client.? 192.168.123.101:0/535210788' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:58.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:57 vm04 bash[34817]: audit 2026-04-16T19:31:56.918655+0000 mon.vm01 (mon.0) 1146 : audit [DBG] from='client.? 192.168.123.101:0/535210788' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:58.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:57 vm01 bash[28222]: audit 2026-04-16T19:31:56.458564+0000 mgr.vm01.nwhpas (mgr.14227) 625 : audit [DBG] from='client.15856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:58.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:57 vm01 bash[28222]: audit 2026-04-16T19:31:56.458564+0000 mgr.vm01.nwhpas (mgr.14227) 625 : audit [DBG] from='client.15856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:58.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:57 vm01 bash[28222]: audit 2026-04-16T19:31:56.669779+0000 mgr.vm01.nwhpas (mgr.14227) 626 : audit [DBG] from='client.15860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:58.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:57 vm01 bash[28222]: audit 2026-04-16T19:31:56.669779+0000 mgr.vm01.nwhpas (mgr.14227) 626 : audit [DBG] from='client.15860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:31:58.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:57 vm01 bash[28222]: audit 2026-04-16T19:31:56.918655+0000 mon.vm01 (mon.0) 1146 : audit [DBG] from='client.? 192.168.123.101:0/535210788' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:58.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:57 vm01 bash[28222]: audit 2026-04-16T19:31:56.918655+0000 mon.vm01 (mon.0) 1146 : audit [DBG] from='client.? 192.168.123.101:0/535210788' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:31:59.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:58 vm04 bash[34817]: cluster 2026-04-16T19:31:57.372260+0000 mgr.vm01.nwhpas (mgr.14227) 627 : cluster [DBG] pgmap v341: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:59.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:31:58 vm04 bash[34817]: cluster 2026-04-16T19:31:57.372260+0000 mgr.vm01.nwhpas (mgr.14227) 627 : cluster [DBG] pgmap v341: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:59.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:58 vm01 bash[28222]: cluster 2026-04-16T19:31:57.372260+0000 mgr.vm01.nwhpas (mgr.14227) 627 : cluster [DBG] pgmap v341: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:31:59.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:31:58 vm01 bash[28222]: cluster 2026-04-16T19:31:57.372260+0000 mgr.vm01.nwhpas (mgr.14227) 627 : cluster [DBG] pgmap v341: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:01.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:00 vm04 bash[34817]: cluster 2026-04-16T19:31:59.372688+0000 mgr.vm01.nwhpas (mgr.14227) 628 : cluster [DBG] pgmap v342: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:01.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:00 vm04 bash[34817]: cluster 2026-04-16T19:31:59.372688+0000 mgr.vm01.nwhpas (mgr.14227) 628 : cluster [DBG] pgmap v342: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:01.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:00 vm01 bash[28222]: cluster 2026-04-16T19:31:59.372688+0000 mgr.vm01.nwhpas (mgr.14227) 628 : cluster [DBG] pgmap v342: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:01.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:00 vm01 bash[28222]: cluster 2026-04-16T19:31:59.372688+0000 mgr.vm01.nwhpas (mgr.14227) 628 : cluster [DBG] pgmap v342: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:02.148 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:32:02.352 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:32:02.352 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (8m) 2m ago 8m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:32:02.352 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (2m) 2m ago 8m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:32:02.352 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 2m ago 8m - - 2026-04-16T19:32:02.352 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (8m) 2m ago 8m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:32:02.595 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:32:02.595 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:32:02.595 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:32:03.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:02 vm04 bash[34817]: cluster 2026-04-16T19:32:01.373129+0000 mgr.vm01.nwhpas (mgr.14227) 629 : cluster [DBG] pgmap v343: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:03.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:02 vm04 bash[34817]: cluster 2026-04-16T19:32:01.373129+0000 mgr.vm01.nwhpas (mgr.14227) 629 : cluster [DBG] pgmap v343: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:03.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:02 vm04 bash[34817]: audit 2026-04-16T19:32:02.591057+0000 mon.vm01 (mon.0) 1147 : audit [DBG] from='client.? 192.168.123.101:0/3752931621' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:03.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:02 vm04 bash[34817]: audit 2026-04-16T19:32:02.591057+0000 mon.vm01 (mon.0) 1147 : audit [DBG] from='client.? 192.168.123.101:0/3752931621' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:03.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:02 vm01 bash[28222]: cluster 2026-04-16T19:32:01.373129+0000 mgr.vm01.nwhpas (mgr.14227) 629 : cluster [DBG] pgmap v343: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:03.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:02 vm01 bash[28222]: cluster 2026-04-16T19:32:01.373129+0000 mgr.vm01.nwhpas (mgr.14227) 629 : cluster [DBG] pgmap v343: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:03.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:02 vm01 bash[28222]: audit 2026-04-16T19:32:02.591057+0000 mon.vm01 (mon.0) 1147 : audit [DBG] from='client.? 192.168.123.101:0/3752931621' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:03.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:02 vm01 bash[28222]: audit 2026-04-16T19:32:02.591057+0000 mon.vm01 (mon.0) 1147 : audit [DBG] from='client.? 192.168.123.101:0/3752931621' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:04.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:03 vm04 bash[34817]: audit 2026-04-16T19:32:02.126173+0000 mgr.vm01.nwhpas (mgr.14227) 630 : audit [DBG] from='client.15868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:04.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:03 vm04 bash[34817]: audit 2026-04-16T19:32:02.126173+0000 mgr.vm01.nwhpas (mgr.14227) 630 : audit [DBG] from='client.15868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:04.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:03 vm04 bash[34817]: audit 2026-04-16T19:32:02.345209+0000 mgr.vm01.nwhpas (mgr.14227) 631 : audit [DBG] from='client.15872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:04.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:03 vm04 bash[34817]: audit 2026-04-16T19:32:02.345209+0000 mgr.vm01.nwhpas (mgr.14227) 631 : audit [DBG] from='client.15872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:04.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:03 vm01 bash[28222]: audit 2026-04-16T19:32:02.126173+0000 mgr.vm01.nwhpas (mgr.14227) 630 : audit [DBG] from='client.15868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:04.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:03 vm01 bash[28222]: audit 2026-04-16T19:32:02.126173+0000 mgr.vm01.nwhpas (mgr.14227) 630 : audit [DBG] from='client.15868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:04.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:03 vm01 bash[28222]: audit 2026-04-16T19:32:02.345209+0000 mgr.vm01.nwhpas (mgr.14227) 631 : audit [DBG] from='client.15872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:04.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:03 vm01 bash[28222]: audit 2026-04-16T19:32:02.345209+0000 mgr.vm01.nwhpas (mgr.14227) 631 : audit [DBG] from='client.15872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:05.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:04 vm04 bash[34817]: cluster 2026-04-16T19:32:03.373536+0000 mgr.vm01.nwhpas (mgr.14227) 632 : cluster [DBG] pgmap v344: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:05.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:04 vm04 bash[34817]: cluster 2026-04-16T19:32:03.373536+0000 mgr.vm01.nwhpas (mgr.14227) 632 : cluster [DBG] pgmap v344: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:05.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:04 vm01 bash[28222]: cluster 2026-04-16T19:32:03.373536+0000 mgr.vm01.nwhpas (mgr.14227) 632 : cluster [DBG] pgmap v344: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:05.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:04 vm01 bash[28222]: cluster 2026-04-16T19:32:03.373536+0000 mgr.vm01.nwhpas (mgr.14227) 632 : cluster [DBG] pgmap v344: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:06 vm01 bash[28222]: cluster 2026-04-16T19:32:05.374205+0000 mgr.vm01.nwhpas (mgr.14227) 633 : cluster [DBG] pgmap v345: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:06 vm01 bash[28222]: cluster 2026-04-16T19:32:05.374205+0000 mgr.vm01.nwhpas (mgr.14227) 633 : cluster [DBG] pgmap v345: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:07.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:06 vm04 bash[34817]: cluster 2026-04-16T19:32:05.374205+0000 mgr.vm01.nwhpas (mgr.14227) 633 : cluster [DBG] pgmap v345: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:07.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:06 vm04 bash[34817]: cluster 2026-04-16T19:32:05.374205+0000 mgr.vm01.nwhpas (mgr.14227) 633 : cluster [DBG] pgmap v345: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:07.831 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:32:08.054 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:32:08.054 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (8m) 2m ago 9m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:32:08.054 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (2m) 2m ago 9m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:32:08.054 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 2m ago 9m - - 2026-04-16T19:32:08.054 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (9m) 2m ago 9m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:32:08.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:07 vm04 bash[34817]: audit 2026-04-16T19:32:07.577122+0000 mon.vm01 (mon.0) 1148 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:08.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:07 vm04 bash[34817]: audit 2026-04-16T19:32:07.577122+0000 mon.vm01 (mon.0) 1148 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:07 vm01 bash[28222]: audit 2026-04-16T19:32:07.577122+0000 mon.vm01 (mon.0) 1148 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:07 vm01 bash[28222]: audit 2026-04-16T19:32:07.577122+0000 mon.vm01 (mon.0) 1148 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:08.315 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:32:08.315 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:32:08.315 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:32:09.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:08 vm04 bash[34817]: cluster 2026-04-16T19:32:07.374844+0000 mgr.vm01.nwhpas (mgr.14227) 634 : cluster [DBG] pgmap v346: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:09.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:08 vm04 bash[34817]: cluster 2026-04-16T19:32:07.374844+0000 mgr.vm01.nwhpas (mgr.14227) 634 : cluster [DBG] pgmap v346: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:09.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:08 vm04 bash[34817]: audit 2026-04-16T19:32:07.806763+0000 mgr.vm01.nwhpas (mgr.14227) 635 : audit [DBG] from='client.15880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:09.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:08 vm04 bash[34817]: audit 2026-04-16T19:32:07.806763+0000 mgr.vm01.nwhpas (mgr.14227) 635 : audit [DBG] from='client.15880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:09.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:08 vm04 bash[34817]: audit 2026-04-16T19:32:08.310233+0000 mon.vm01 (mon.0) 1149 : audit [DBG] from='client.? 192.168.123.101:0/1977393402' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:09.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:08 vm04 bash[34817]: audit 2026-04-16T19:32:08.310233+0000 mon.vm01 (mon.0) 1149 : audit [DBG] from='client.? 192.168.123.101:0/1977393402' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:09.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:08 vm01 bash[28222]: cluster 2026-04-16T19:32:07.374844+0000 mgr.vm01.nwhpas (mgr.14227) 634 : cluster [DBG] pgmap v346: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:09.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:08 vm01 bash[28222]: cluster 2026-04-16T19:32:07.374844+0000 mgr.vm01.nwhpas (mgr.14227) 634 : cluster [DBG] pgmap v346: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:09.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:08 vm01 bash[28222]: audit 2026-04-16T19:32:07.806763+0000 mgr.vm01.nwhpas (mgr.14227) 635 : audit [DBG] from='client.15880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:09.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:08 vm01 bash[28222]: audit 2026-04-16T19:32:07.806763+0000 mgr.vm01.nwhpas (mgr.14227) 635 : audit [DBG] from='client.15880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:09.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:08 vm01 bash[28222]: audit 2026-04-16T19:32:08.310233+0000 mon.vm01 (mon.0) 1149 : audit [DBG] from='client.? 192.168.123.101:0/1977393402' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:09.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:08 vm01 bash[28222]: audit 2026-04-16T19:32:08.310233+0000 mon.vm01 (mon.0) 1149 : audit [DBG] from='client.? 192.168.123.101:0/1977393402' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:10.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:09 vm04 bash[34817]: audit 2026-04-16T19:32:08.046744+0000 mgr.vm01.nwhpas (mgr.14227) 636 : audit [DBG] from='client.15884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:10.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:09 vm04 bash[34817]: audit 2026-04-16T19:32:08.046744+0000 mgr.vm01.nwhpas (mgr.14227) 636 : audit [DBG] from='client.15884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:10.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:09 vm01 bash[28222]: audit 2026-04-16T19:32:08.046744+0000 mgr.vm01.nwhpas (mgr.14227) 636 : audit [DBG] from='client.15884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:10.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:09 vm01 bash[28222]: audit 2026-04-16T19:32:08.046744+0000 mgr.vm01.nwhpas (mgr.14227) 636 : audit [DBG] from='client.15884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:11.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:10 vm04 bash[34817]: cluster 2026-04-16T19:32:09.375352+0000 mgr.vm01.nwhpas (mgr.14227) 637 : cluster [DBG] pgmap v347: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:11.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:10 vm04 bash[34817]: cluster 2026-04-16T19:32:09.375352+0000 mgr.vm01.nwhpas (mgr.14227) 637 : cluster [DBG] pgmap v347: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:11.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:10 vm01 bash[28222]: cluster 2026-04-16T19:32:09.375352+0000 mgr.vm01.nwhpas (mgr.14227) 637 : cluster [DBG] pgmap v347: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:11.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:10 vm01 bash[28222]: cluster 2026-04-16T19:32:09.375352+0000 mgr.vm01.nwhpas (mgr.14227) 637 : cluster [DBG] pgmap v347: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:13.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:12 vm04 bash[34817]: cluster 2026-04-16T19:32:11.375802+0000 mgr.vm01.nwhpas (mgr.14227) 638 : cluster [DBG] pgmap v348: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:13.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:12 vm04 bash[34817]: cluster 2026-04-16T19:32:11.375802+0000 mgr.vm01.nwhpas (mgr.14227) 638 : cluster [DBG] pgmap v348: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:13.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:12 vm01 bash[28222]: cluster 2026-04-16T19:32:11.375802+0000 mgr.vm01.nwhpas (mgr.14227) 638 : cluster [DBG] pgmap v348: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:13.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:12 vm01 bash[28222]: cluster 2026-04-16T19:32:11.375802+0000 mgr.vm01.nwhpas (mgr.14227) 638 : cluster [DBG] pgmap v348: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:13.567 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:32:13.764 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:32:13.764 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (8m) 2m ago 9m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:32:13.764 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (3m) 2m ago 9m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:32:13.764 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 2m ago 9m - - 2026-04-16T19:32:13.764 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (9m) 2m ago 9m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:32:14.029 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:32:14.029 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:32:14.029 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:32:15.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:14 vm04 bash[34817]: cluster 2026-04-16T19:32:13.376415+0000 mgr.vm01.nwhpas (mgr.14227) 639 : cluster [DBG] pgmap v349: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:15.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:14 vm04 bash[34817]: cluster 2026-04-16T19:32:13.376415+0000 mgr.vm01.nwhpas (mgr.14227) 639 : cluster [DBG] pgmap v349: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:15.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:14 vm04 bash[34817]: audit 2026-04-16T19:32:13.541409+0000 mgr.vm01.nwhpas (mgr.14227) 640 : audit [DBG] from='client.15892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:15.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:14 vm04 bash[34817]: audit 2026-04-16T19:32:13.541409+0000 mgr.vm01.nwhpas (mgr.14227) 640 : audit [DBG] from='client.15892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:15.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:14 vm04 bash[34817]: audit 2026-04-16T19:32:13.756294+0000 mgr.vm01.nwhpas (mgr.14227) 641 : audit [DBG] from='client.15896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:15.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:14 vm04 bash[34817]: audit 2026-04-16T19:32:13.756294+0000 mgr.vm01.nwhpas (mgr.14227) 641 : audit [DBG] from='client.15896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:15.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:14 vm04 bash[34817]: audit 2026-04-16T19:32:14.024862+0000 mon.vm01 (mon.0) 1150 : audit [DBG] from='client.? 192.168.123.101:0/1875382231' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:15.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:14 vm04 bash[34817]: audit 2026-04-16T19:32:14.024862+0000 mon.vm01 (mon.0) 1150 : audit [DBG] from='client.? 192.168.123.101:0/1875382231' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:14 vm01 bash[28222]: cluster 2026-04-16T19:32:13.376415+0000 mgr.vm01.nwhpas (mgr.14227) 639 : cluster [DBG] pgmap v349: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:14 vm01 bash[28222]: cluster 2026-04-16T19:32:13.376415+0000 mgr.vm01.nwhpas (mgr.14227) 639 : cluster [DBG] pgmap v349: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:14 vm01 bash[28222]: audit 2026-04-16T19:32:13.541409+0000 mgr.vm01.nwhpas (mgr.14227) 640 : audit [DBG] from='client.15892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:14 vm01 bash[28222]: audit 2026-04-16T19:32:13.541409+0000 mgr.vm01.nwhpas (mgr.14227) 640 : audit [DBG] from='client.15892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:14 vm01 bash[28222]: audit 2026-04-16T19:32:13.756294+0000 mgr.vm01.nwhpas (mgr.14227) 641 : audit [DBG] from='client.15896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:14 vm01 bash[28222]: audit 2026-04-16T19:32:13.756294+0000 mgr.vm01.nwhpas (mgr.14227) 641 : audit [DBG] from='client.15896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:14 vm01 bash[28222]: audit 2026-04-16T19:32:14.024862+0000 mon.vm01 (mon.0) 1150 : audit [DBG] from='client.? 192.168.123.101:0/1875382231' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:15.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:14 vm01 bash[28222]: audit 2026-04-16T19:32:14.024862+0000 mon.vm01 (mon.0) 1150 : audit [DBG] from='client.? 192.168.123.101:0/1875382231' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:16.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:16 vm01 bash[28222]: cluster 2026-04-16T19:32:15.377072+0000 mgr.vm01.nwhpas (mgr.14227) 642 : cluster [DBG] pgmap v350: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:16.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:16 vm01 bash[28222]: cluster 2026-04-16T19:32:15.377072+0000 mgr.vm01.nwhpas (mgr.14227) 642 : cluster [DBG] pgmap v350: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:17.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:16 vm04 bash[34817]: cluster 2026-04-16T19:32:15.377072+0000 mgr.vm01.nwhpas (mgr.14227) 642 : cluster [DBG] pgmap v350: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:17.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:16 vm04 bash[34817]: cluster 2026-04-16T19:32:15.377072+0000 mgr.vm01.nwhpas (mgr.14227) 642 : cluster [DBG] pgmap v350: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:19.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:18 vm04 bash[34817]: cluster 2026-04-16T19:32:17.377519+0000 mgr.vm01.nwhpas (mgr.14227) 643 : cluster [DBG] pgmap v351: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:19.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:18 vm04 bash[34817]: cluster 2026-04-16T19:32:17.377519+0000 mgr.vm01.nwhpas (mgr.14227) 643 : cluster [DBG] pgmap v351: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:19.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:18 vm01 bash[28222]: cluster 2026-04-16T19:32:17.377519+0000 mgr.vm01.nwhpas (mgr.14227) 643 : cluster [DBG] pgmap v351: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:19.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:18 vm01 bash[28222]: cluster 2026-04-16T19:32:17.377519+0000 mgr.vm01.nwhpas (mgr.14227) 643 : cluster [DBG] pgmap v351: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:19.252 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:32:19.431 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:32:19.431 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (8m) 3m ago 9m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:32:19.431 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (3m) 3m ago 9m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:32:19.431 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 2m ago 9m - - 2026-04-16T19:32:19.431 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (9m) 2m ago 9m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:32:19.687 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:32:19.687 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:32:19.687 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:32:20.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:19 vm04 bash[34817]: audit 2026-04-16T19:32:19.682313+0000 mon.vm01 (mon.0) 1151 : audit [DBG] from='client.? 192.168.123.101:0/2769672398' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:20.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:19 vm04 bash[34817]: audit 2026-04-16T19:32:19.682313+0000 mon.vm01 (mon.0) 1151 : audit [DBG] from='client.? 192.168.123.101:0/2769672398' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:20.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:19 vm01 bash[28222]: audit 2026-04-16T19:32:19.682313+0000 mon.vm01 (mon.0) 1151 : audit [DBG] from='client.? 192.168.123.101:0/2769672398' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:20.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:19 vm01 bash[28222]: audit 2026-04-16T19:32:19.682313+0000 mon.vm01 (mon.0) 1151 : audit [DBG] from='client.? 192.168.123.101:0/2769672398' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:21.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:20 vm04 bash[34817]: audit 2026-04-16T19:32:19.227803+0000 mgr.vm01.nwhpas (mgr.14227) 644 : audit [DBG] from='client.15904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:21.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:20 vm04 bash[34817]: audit 2026-04-16T19:32:19.227803+0000 mgr.vm01.nwhpas (mgr.14227) 644 : audit [DBG] from='client.15904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:21.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:20 vm04 bash[34817]: cluster 2026-04-16T19:32:19.378015+0000 mgr.vm01.nwhpas (mgr.14227) 645 : cluster [DBG] pgmap v352: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:21.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:20 vm04 bash[34817]: cluster 2026-04-16T19:32:19.378015+0000 mgr.vm01.nwhpas (mgr.14227) 645 : cluster [DBG] pgmap v352: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:21.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:20 vm04 bash[34817]: audit 2026-04-16T19:32:19.424159+0000 mgr.vm01.nwhpas (mgr.14227) 646 : audit [DBG] from='client.15908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:21.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:20 vm04 bash[34817]: audit 2026-04-16T19:32:19.424159+0000 mgr.vm01.nwhpas (mgr.14227) 646 : audit [DBG] from='client.15908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:21.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:20 vm01 bash[28222]: audit 2026-04-16T19:32:19.227803+0000 mgr.vm01.nwhpas (mgr.14227) 644 : audit [DBG] from='client.15904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:21.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:20 vm01 bash[28222]: audit 2026-04-16T19:32:19.227803+0000 mgr.vm01.nwhpas (mgr.14227) 644 : audit [DBG] from='client.15904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:21.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:20 vm01 bash[28222]: cluster 2026-04-16T19:32:19.378015+0000 mgr.vm01.nwhpas (mgr.14227) 645 : cluster [DBG] pgmap v352: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:21.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:20 vm01 bash[28222]: cluster 2026-04-16T19:32:19.378015+0000 mgr.vm01.nwhpas (mgr.14227) 645 : cluster [DBG] pgmap v352: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:21.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:20 vm01 bash[28222]: audit 2026-04-16T19:32:19.424159+0000 mgr.vm01.nwhpas (mgr.14227) 646 : audit [DBG] from='client.15908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:21.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:20 vm01 bash[28222]: audit 2026-04-16T19:32:19.424159+0000 mgr.vm01.nwhpas (mgr.14227) 646 : audit [DBG] from='client.15908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:23.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:22 vm04 bash[34817]: cluster 2026-04-16T19:32:21.378537+0000 mgr.vm01.nwhpas (mgr.14227) 647 : cluster [DBG] pgmap v353: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:23.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:22 vm04 bash[34817]: cluster 2026-04-16T19:32:21.378537+0000 mgr.vm01.nwhpas (mgr.14227) 647 : cluster [DBG] pgmap v353: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:23.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:22 vm04 bash[34817]: audit 2026-04-16T19:32:22.577569+0000 mon.vm01 (mon.0) 1152 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:23.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:22 vm04 bash[34817]: audit 2026-04-16T19:32:22.577569+0000 mon.vm01 (mon.0) 1152 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:22 vm01 bash[28222]: cluster 2026-04-16T19:32:21.378537+0000 mgr.vm01.nwhpas (mgr.14227) 647 : cluster [DBG] pgmap v353: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:22 vm01 bash[28222]: cluster 2026-04-16T19:32:21.378537+0000 mgr.vm01.nwhpas (mgr.14227) 647 : cluster [DBG] pgmap v353: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:22 vm01 bash[28222]: audit 2026-04-16T19:32:22.577569+0000 mon.vm01 (mon.0) 1152 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:22 vm01 bash[28222]: audit 2026-04-16T19:32:22.577569+0000 mon.vm01 (mon.0) 1152 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:24.910 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:32:25.112 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:32:25.112 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (8m) 3m ago 9m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:32:25.112 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (3m) 3m ago 9m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:32:25.112 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 2m ago 9m - - 2026-04-16T19:32:25.112 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (9m) 2m ago 9m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:32:25.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:24 vm04 bash[34817]: cluster 2026-04-16T19:32:23.379026+0000 mgr.vm01.nwhpas (mgr.14227) 648 : cluster [DBG] pgmap v354: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:25.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:24 vm04 bash[34817]: cluster 2026-04-16T19:32:23.379026+0000 mgr.vm01.nwhpas (mgr.14227) 648 : cluster [DBG] pgmap v354: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:25.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:24 vm01 bash[28222]: cluster 2026-04-16T19:32:23.379026+0000 mgr.vm01.nwhpas (mgr.14227) 648 : cluster [DBG] pgmap v354: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:25.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:24 vm01 bash[28222]: cluster 2026-04-16T19:32:23.379026+0000 mgr.vm01.nwhpas (mgr.14227) 648 : cluster [DBG] pgmap v354: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:25.353 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:32:25.353 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:32:25.353 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:32:26.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:25 vm04 bash[34817]: audit 2026-04-16T19:32:24.884652+0000 mgr.vm01.nwhpas (mgr.14227) 649 : audit [DBG] from='client.15916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:26.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:25 vm04 bash[34817]: audit 2026-04-16T19:32:24.884652+0000 mgr.vm01.nwhpas (mgr.14227) 649 : audit [DBG] from='client.15916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:26.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:25 vm04 bash[34817]: audit 2026-04-16T19:32:25.348385+0000 mon.vm01 (mon.0) 1153 : audit [DBG] from='client.? 192.168.123.101:0/2029118040' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:26.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:25 vm04 bash[34817]: audit 2026-04-16T19:32:25.348385+0000 mon.vm01 (mon.0) 1153 : audit [DBG] from='client.? 192.168.123.101:0/2029118040' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:26.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:25 vm01 bash[28222]: audit 2026-04-16T19:32:24.884652+0000 mgr.vm01.nwhpas (mgr.14227) 649 : audit [DBG] from='client.15916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:26.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:25 vm01 bash[28222]: audit 2026-04-16T19:32:24.884652+0000 mgr.vm01.nwhpas (mgr.14227) 649 : audit [DBG] from='client.15916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:26.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:25 vm01 bash[28222]: audit 2026-04-16T19:32:25.348385+0000 mon.vm01 (mon.0) 1153 : audit [DBG] from='client.? 192.168.123.101:0/2029118040' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:26.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:25 vm01 bash[28222]: audit 2026-04-16T19:32:25.348385+0000 mon.vm01 (mon.0) 1153 : audit [DBG] from='client.? 192.168.123.101:0/2029118040' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:27.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:26 vm04 bash[34817]: audit 2026-04-16T19:32:25.104510+0000 mgr.vm01.nwhpas (mgr.14227) 650 : audit [DBG] from='client.15920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:27.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:26 vm04 bash[34817]: audit 2026-04-16T19:32:25.104510+0000 mgr.vm01.nwhpas (mgr.14227) 650 : audit [DBG] from='client.15920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:27.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:26 vm04 bash[34817]: cluster 2026-04-16T19:32:25.379555+0000 mgr.vm01.nwhpas (mgr.14227) 651 : cluster [DBG] pgmap v355: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:27.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:26 vm04 bash[34817]: cluster 2026-04-16T19:32:25.379555+0000 mgr.vm01.nwhpas (mgr.14227) 651 : cluster [DBG] pgmap v355: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:27.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:26 vm01 bash[28222]: audit 2026-04-16T19:32:25.104510+0000 mgr.vm01.nwhpas (mgr.14227) 650 : audit [DBG] from='client.15920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:27.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:26 vm01 bash[28222]: audit 2026-04-16T19:32:25.104510+0000 mgr.vm01.nwhpas (mgr.14227) 650 : audit [DBG] from='client.15920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:27.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:26 vm01 bash[28222]: cluster 2026-04-16T19:32:25.379555+0000 mgr.vm01.nwhpas (mgr.14227) 651 : cluster [DBG] pgmap v355: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:27.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:26 vm01 bash[28222]: cluster 2026-04-16T19:32:25.379555+0000 mgr.vm01.nwhpas (mgr.14227) 651 : cluster [DBG] pgmap v355: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:29.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:28 vm04 bash[34817]: cluster 2026-04-16T19:32:27.380022+0000 mgr.vm01.nwhpas (mgr.14227) 652 : cluster [DBG] pgmap v356: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:29.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:28 vm04 bash[34817]: cluster 2026-04-16T19:32:27.380022+0000 mgr.vm01.nwhpas (mgr.14227) 652 : cluster [DBG] pgmap v356: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:29.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:28 vm01 bash[28222]: cluster 2026-04-16T19:32:27.380022+0000 mgr.vm01.nwhpas (mgr.14227) 652 : cluster [DBG] pgmap v356: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:29.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:28 vm01 bash[28222]: cluster 2026-04-16T19:32:27.380022+0000 mgr.vm01.nwhpas (mgr.14227) 652 : cluster [DBG] pgmap v356: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:30.562 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:32:30.768 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:32:30.768 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (8m) 3m ago 9m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:32:30.768 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (3m) 3m ago 9m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:32:30.768 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 2m ago 9m - - 2026-04-16T19:32:30.768 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (9m) 2m ago 9m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:32:31.010 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:32:31.010 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:32:31.010 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:32:31.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:30 vm04 bash[34817]: cluster 2026-04-16T19:32:29.380690+0000 mgr.vm01.nwhpas (mgr.14227) 653 : cluster [DBG] pgmap v357: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:31.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:30 vm04 bash[34817]: cluster 2026-04-16T19:32:29.380690+0000 mgr.vm01.nwhpas (mgr.14227) 653 : cluster [DBG] pgmap v357: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:31.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:30 vm01 bash[28222]: cluster 2026-04-16T19:32:29.380690+0000 mgr.vm01.nwhpas (mgr.14227) 653 : cluster [DBG] pgmap v357: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:31.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:30 vm01 bash[28222]: cluster 2026-04-16T19:32:29.380690+0000 mgr.vm01.nwhpas (mgr.14227) 653 : cluster [DBG] pgmap v357: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:32.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:31 vm04 bash[34817]: audit 2026-04-16T19:32:30.537345+0000 mgr.vm01.nwhpas (mgr.14227) 654 : audit [DBG] from='client.15928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:32.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:31 vm04 bash[34817]: audit 2026-04-16T19:32:30.537345+0000 mgr.vm01.nwhpas (mgr.14227) 654 : audit [DBG] from='client.15928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:32.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:31 vm04 bash[34817]: audit 2026-04-16T19:32:30.760945+0000 mgr.vm01.nwhpas (mgr.14227) 655 : audit [DBG] from='client.15932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:32.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:31 vm04 bash[34817]: audit 2026-04-16T19:32:30.760945+0000 mgr.vm01.nwhpas (mgr.14227) 655 : audit [DBG] from='client.15932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:32.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:31 vm04 bash[34817]: audit 2026-04-16T19:32:31.005731+0000 mon.vm01 (mon.0) 1154 : audit [DBG] from='client.? 192.168.123.101:0/3046672819' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:32.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:31 vm04 bash[34817]: audit 2026-04-16T19:32:31.005731+0000 mon.vm01 (mon.0) 1154 : audit [DBG] from='client.? 192.168.123.101:0/3046672819' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:32.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:31 vm04 bash[34817]: cluster 2026-04-16T19:32:31.381199+0000 mgr.vm01.nwhpas (mgr.14227) 656 : cluster [DBG] pgmap v358: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:32.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:31 vm04 bash[34817]: cluster 2026-04-16T19:32:31.381199+0000 mgr.vm01.nwhpas (mgr.14227) 656 : cluster [DBG] pgmap v358: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:31 vm01 bash[28222]: audit 2026-04-16T19:32:30.537345+0000 mgr.vm01.nwhpas (mgr.14227) 654 : audit [DBG] from='client.15928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:31 vm01 bash[28222]: audit 2026-04-16T19:32:30.537345+0000 mgr.vm01.nwhpas (mgr.14227) 654 : audit [DBG] from='client.15928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:31 vm01 bash[28222]: audit 2026-04-16T19:32:30.760945+0000 mgr.vm01.nwhpas (mgr.14227) 655 : audit [DBG] from='client.15932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:31 vm01 bash[28222]: audit 2026-04-16T19:32:30.760945+0000 mgr.vm01.nwhpas (mgr.14227) 655 : audit [DBG] from='client.15932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:31 vm01 bash[28222]: audit 2026-04-16T19:32:31.005731+0000 mon.vm01 (mon.0) 1154 : audit [DBG] from='client.? 192.168.123.101:0/3046672819' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:31 vm01 bash[28222]: audit 2026-04-16T19:32:31.005731+0000 mon.vm01 (mon.0) 1154 : audit [DBG] from='client.? 192.168.123.101:0/3046672819' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:31 vm01 bash[28222]: cluster 2026-04-16T19:32:31.381199+0000 mgr.vm01.nwhpas (mgr.14227) 656 : cluster [DBG] pgmap v358: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:32.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:31 vm01 bash[28222]: cluster 2026-04-16T19:32:31.381199+0000 mgr.vm01.nwhpas (mgr.14227) 656 : cluster [DBG] pgmap v358: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:34.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:34 vm04 bash[34817]: cluster 2026-04-16T19:32:33.381783+0000 mgr.vm01.nwhpas (mgr.14227) 657 : cluster [DBG] pgmap v359: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:34.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:34 vm04 bash[34817]: cluster 2026-04-16T19:32:33.381783+0000 mgr.vm01.nwhpas (mgr.14227) 657 : cluster [DBG] pgmap v359: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:34.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:34 vm01 bash[28222]: cluster 2026-04-16T19:32:33.381783+0000 mgr.vm01.nwhpas (mgr.14227) 657 : cluster [DBG] pgmap v359: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:34.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:34 vm01 bash[28222]: cluster 2026-04-16T19:32:33.381783+0000 mgr.vm01.nwhpas (mgr.14227) 657 : cluster [DBG] pgmap v359: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:35.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:35 vm04 bash[34817]: audit 2026-04-16T19:32:35.388110+0000 mon.vm01 (mon.0) 1155 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:32:35.960 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:35 vm04 bash[34817]: audit 2026-04-16T19:32:35.388110+0000 mon.vm01 (mon.0) 1155 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:32:35.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:35 vm01 bash[28222]: audit 2026-04-16T19:32:35.388110+0000 mon.vm01 (mon.0) 1155 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:32:35.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:35 vm01 bash[28222]: audit 2026-04-16T19:32:35.388110+0000 mon.vm01 (mon.0) 1155 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:32:36.232 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:32:36.424 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:32:36.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (8m) 3m ago 9m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:32:36.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (3m) 3m ago 9m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:32:36.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 3m ago 9m - - 2026-04-16T19:32:36.424 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (9m) 3m ago 9m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:32:36.674 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:32:36.674 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:32:36.674 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:32:36.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: cluster 2026-04-16T19:32:35.382431+0000 mgr.vm01.nwhpas (mgr.14227) 658 : cluster [DBG] pgmap v360: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: cluster 2026-04-16T19:32:35.382431+0000 mgr.vm01.nwhpas (mgr.14227) 658 : cluster [DBG] pgmap v360: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:35.728604+0000 mon.vm01 (mon.0) 1156 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:35.728604+0000 mon.vm01 (mon.0) 1156 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:35.735300+0000 mon.vm01 (mon.0) 1157 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:35.735300+0000 mon.vm01 (mon.0) 1157 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:36.097909+0000 mon.vm01 (mon.0) 1158 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:36.097909+0000 mon.vm01 (mon.0) 1158 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:36.098514+0000 mon.vm01 (mon.0) 1159 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:36.098514+0000 mon.vm01 (mon.0) 1159 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:36.104647+0000 mon.vm01 (mon.0) 1160 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:36.104647+0000 mon.vm01 (mon.0) 1160 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:36.106259+0000 mon.vm01 (mon.0) 1161 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:36.106259+0000 mon.vm01 (mon.0) 1161 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:36.669945+0000 mon.vm01 (mon.0) 1162 : audit [DBG] from='client.? 192.168.123.101:0/2212181217' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:36 vm01 bash[28222]: audit 2026-04-16T19:32:36.669945+0000 mon.vm01 (mon.0) 1162 : audit [DBG] from='client.? 192.168.123.101:0/2212181217' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:37.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: cluster 2026-04-16T19:32:35.382431+0000 mgr.vm01.nwhpas (mgr.14227) 658 : cluster [DBG] pgmap v360: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:37.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: cluster 2026-04-16T19:32:35.382431+0000 mgr.vm01.nwhpas (mgr.14227) 658 : cluster [DBG] pgmap v360: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:37.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:35.728604+0000 mon.vm01 (mon.0) 1156 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:37.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:35.728604+0000 mon.vm01 (mon.0) 1156 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:35.735300+0000 mon.vm01 (mon.0) 1157 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:35.735300+0000 mon.vm01 (mon.0) 1157 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:36.097909+0000 mon.vm01 (mon.0) 1158 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:36.097909+0000 mon.vm01 (mon.0) 1158 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:36.098514+0000 mon.vm01 (mon.0) 1159 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:36.098514+0000 mon.vm01 (mon.0) 1159 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:36.104647+0000 mon.vm01 (mon.0) 1160 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:36.104647+0000 mon.vm01 (mon.0) 1160 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:36.106259+0000 mon.vm01 (mon.0) 1161 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:36.106259+0000 mon.vm01 (mon.0) 1161 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:36.669945+0000 mon.vm01 (mon.0) 1162 : audit [DBG] from='client.? 192.168.123.101:0/2212181217' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:36 vm04 bash[34817]: audit 2026-04-16T19:32:36.669945+0000 mon.vm01 (mon.0) 1162 : audit [DBG] from='client.? 192.168.123.101:0/2212181217' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:37 vm04 bash[34817]: cluster 2026-04-16T19:32:36.099821+0000 mgr.vm01.nwhpas (mgr.14227) 659 : cluster [DBG] pgmap v361: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:37 vm04 bash[34817]: cluster 2026-04-16T19:32:36.099821+0000 mgr.vm01.nwhpas (mgr.14227) 659 : cluster [DBG] pgmap v361: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:37 vm04 bash[34817]: audit 2026-04-16T19:32:36.207763+0000 mgr.vm01.nwhpas (mgr.14227) 660 : audit [DBG] from='client.15940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:37 vm04 bash[34817]: audit 2026-04-16T19:32:36.207763+0000 mgr.vm01.nwhpas (mgr.14227) 660 : audit [DBG] from='client.15940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:37 vm04 bash[34817]: audit 2026-04-16T19:32:36.416178+0000 mgr.vm01.nwhpas (mgr.14227) 661 : audit [DBG] from='client.15944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:37 vm04 bash[34817]: audit 2026-04-16T19:32:36.416178+0000 mgr.vm01.nwhpas (mgr.14227) 661 : audit [DBG] from='client.15944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:37 vm04 bash[34817]: audit 2026-04-16T19:32:37.578052+0000 mon.vm01 (mon.0) 1163 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:38.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:37 vm04 bash[34817]: audit 2026-04-16T19:32:37.578052+0000 mon.vm01 (mon.0) 1163 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:37 vm01 bash[28222]: cluster 2026-04-16T19:32:36.099821+0000 mgr.vm01.nwhpas (mgr.14227) 659 : cluster [DBG] pgmap v361: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:37 vm01 bash[28222]: cluster 2026-04-16T19:32:36.099821+0000 mgr.vm01.nwhpas (mgr.14227) 659 : cluster [DBG] pgmap v361: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:37 vm01 bash[28222]: audit 2026-04-16T19:32:36.207763+0000 mgr.vm01.nwhpas (mgr.14227) 660 : audit [DBG] from='client.15940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:37 vm01 bash[28222]: audit 2026-04-16T19:32:36.207763+0000 mgr.vm01.nwhpas (mgr.14227) 660 : audit [DBG] from='client.15940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:37 vm01 bash[28222]: audit 2026-04-16T19:32:36.416178+0000 mgr.vm01.nwhpas (mgr.14227) 661 : audit [DBG] from='client.15944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:37 vm01 bash[28222]: audit 2026-04-16T19:32:36.416178+0000 mgr.vm01.nwhpas (mgr.14227) 661 : audit [DBG] from='client.15944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:37 vm01 bash[28222]: audit 2026-04-16T19:32:37.578052+0000 mon.vm01 (mon.0) 1163 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:38.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:37 vm01 bash[28222]: audit 2026-04-16T19:32:37.578052+0000 mon.vm01 (mon.0) 1163 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:40.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:39 vm04 bash[34817]: cluster 2026-04-16T19:32:38.100201+0000 mgr.vm01.nwhpas (mgr.14227) 662 : cluster [DBG] pgmap v362: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:40.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:39 vm04 bash[34817]: cluster 2026-04-16T19:32:38.100201+0000 mgr.vm01.nwhpas (mgr.14227) 662 : cluster [DBG] pgmap v362: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:39 vm01 bash[28222]: cluster 2026-04-16T19:32:38.100201+0000 mgr.vm01.nwhpas (mgr.14227) 662 : cluster [DBG] pgmap v362: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:40.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:39 vm01 bash[28222]: cluster 2026-04-16T19:32:38.100201+0000 mgr.vm01.nwhpas (mgr.14227) 662 : cluster [DBG] pgmap v362: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:41.904 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:32:42.092 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:32:42.092 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (8m) 3m ago 9m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:32:42.092 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (3m) 3m ago 9m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:32:42.092 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 3m ago 9m - - 2026-04-16T19:32:42.092 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (9m) 3m ago 9m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:32:42.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:41 vm04 bash[34817]: cluster 2026-04-16T19:32:40.100666+0000 mgr.vm01.nwhpas (mgr.14227) 663 : cluster [DBG] pgmap v363: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:42.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:41 vm04 bash[34817]: cluster 2026-04-16T19:32:40.100666+0000 mgr.vm01.nwhpas (mgr.14227) 663 : cluster [DBG] pgmap v363: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:42.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:41 vm01 bash[28222]: cluster 2026-04-16T19:32:40.100666+0000 mgr.vm01.nwhpas (mgr.14227) 663 : cluster [DBG] pgmap v363: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:42.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:41 vm01 bash[28222]: cluster 2026-04-16T19:32:40.100666+0000 mgr.vm01.nwhpas (mgr.14227) 663 : cluster [DBG] pgmap v363: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:42.320 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:32:42.320 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:32:42.320 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:32:43.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:42 vm04 bash[34817]: audit 2026-04-16T19:32:41.881694+0000 mgr.vm01.nwhpas (mgr.14227) 664 : audit [DBG] from='client.15952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:43.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:42 vm04 bash[34817]: audit 2026-04-16T19:32:41.881694+0000 mgr.vm01.nwhpas (mgr.14227) 664 : audit [DBG] from='client.15952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:43.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:42 vm04 bash[34817]: audit 2026-04-16T19:32:42.316126+0000 mon.vm01 (mon.0) 1164 : audit [DBG] from='client.? 192.168.123.101:0/638712' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:43.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:42 vm04 bash[34817]: audit 2026-04-16T19:32:42.316126+0000 mon.vm01 (mon.0) 1164 : audit [DBG] from='client.? 192.168.123.101:0/638712' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:43.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:42 vm01 bash[28222]: audit 2026-04-16T19:32:41.881694+0000 mgr.vm01.nwhpas (mgr.14227) 664 : audit [DBG] from='client.15952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:43.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:42 vm01 bash[28222]: audit 2026-04-16T19:32:41.881694+0000 mgr.vm01.nwhpas (mgr.14227) 664 : audit [DBG] from='client.15952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:43.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:42 vm01 bash[28222]: audit 2026-04-16T19:32:42.316126+0000 mon.vm01 (mon.0) 1164 : audit [DBG] from='client.? 192.168.123.101:0/638712' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:43.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:42 vm01 bash[28222]: audit 2026-04-16T19:32:42.316126+0000 mon.vm01 (mon.0) 1164 : audit [DBG] from='client.? 192.168.123.101:0/638712' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:44.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:43 vm04 bash[34817]: audit 2026-04-16T19:32:42.084701+0000 mgr.vm01.nwhpas (mgr.14227) 665 : audit [DBG] from='client.15956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:44.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:43 vm04 bash[34817]: audit 2026-04-16T19:32:42.084701+0000 mgr.vm01.nwhpas (mgr.14227) 665 : audit [DBG] from='client.15956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:44.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:43 vm04 bash[34817]: cluster 2026-04-16T19:32:42.101074+0000 mgr.vm01.nwhpas (mgr.14227) 666 : cluster [DBG] pgmap v364: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:44.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:43 vm04 bash[34817]: cluster 2026-04-16T19:32:42.101074+0000 mgr.vm01.nwhpas (mgr.14227) 666 : cluster [DBG] pgmap v364: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:44.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:43 vm01 bash[28222]: audit 2026-04-16T19:32:42.084701+0000 mgr.vm01.nwhpas (mgr.14227) 665 : audit [DBG] from='client.15956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:44.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:43 vm01 bash[28222]: audit 2026-04-16T19:32:42.084701+0000 mgr.vm01.nwhpas (mgr.14227) 665 : audit [DBG] from='client.15956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:44.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:43 vm01 bash[28222]: cluster 2026-04-16T19:32:42.101074+0000 mgr.vm01.nwhpas (mgr.14227) 666 : cluster [DBG] pgmap v364: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:44.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:43 vm01 bash[28222]: cluster 2026-04-16T19:32:42.101074+0000 mgr.vm01.nwhpas (mgr.14227) 666 : cluster [DBG] pgmap v364: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:32:46.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:45 vm04 bash[34817]: cluster 2026-04-16T19:32:44.101513+0000 mgr.vm01.nwhpas (mgr.14227) 667 : cluster [DBG] pgmap v365: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:46.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:45 vm04 bash[34817]: cluster 2026-04-16T19:32:44.101513+0000 mgr.vm01.nwhpas (mgr.14227) 667 : cluster [DBG] pgmap v365: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:45 vm01 bash[28222]: cluster 2026-04-16T19:32:44.101513+0000 mgr.vm01.nwhpas (mgr.14227) 667 : cluster [DBG] pgmap v365: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:45 vm01 bash[28222]: cluster 2026-04-16T19:32:44.101513+0000 mgr.vm01.nwhpas (mgr.14227) 667 : cluster [DBG] pgmap v365: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:47.534 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:32:47.717 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:32:47.717 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (8m) 3m ago 9m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:32:47.717 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (3m) 3m ago 9m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:32:47.717 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 3m ago 9m - - 2026-04-16T19:32:47.718 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (9m) 3m ago 9m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:32:47.961 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:32:47.962 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:32:47.962 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:32:48.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:47 vm04 bash[34817]: cluster 2026-04-16T19:32:46.101971+0000 mgr.vm01.nwhpas (mgr.14227) 668 : cluster [DBG] pgmap v366: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:48.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:47 vm04 bash[34817]: cluster 2026-04-16T19:32:46.101971+0000 mgr.vm01.nwhpas (mgr.14227) 668 : cluster [DBG] pgmap v366: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:48.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:47 vm01 bash[28222]: cluster 2026-04-16T19:32:46.101971+0000 mgr.vm01.nwhpas (mgr.14227) 668 : cluster [DBG] pgmap v366: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:48.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:47 vm01 bash[28222]: cluster 2026-04-16T19:32:46.101971+0000 mgr.vm01.nwhpas (mgr.14227) 668 : cluster [DBG] pgmap v366: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-04-16T19:32:49.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:48 vm04 bash[34817]: audit 2026-04-16T19:32:47.512097+0000 mgr.vm01.nwhpas (mgr.14227) 669 : audit [DBG] from='client.15964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:49.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:48 vm04 bash[34817]: audit 2026-04-16T19:32:47.512097+0000 mgr.vm01.nwhpas (mgr.14227) 669 : audit [DBG] from='client.15964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:49.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:48 vm04 bash[34817]: audit 2026-04-16T19:32:47.709915+0000 mgr.vm01.nwhpas (mgr.14227) 670 : audit [DBG] from='client.15968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:49.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:48 vm04 bash[34817]: audit 2026-04-16T19:32:47.709915+0000 mgr.vm01.nwhpas (mgr.14227) 670 : audit [DBG] from='client.15968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:49.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:48 vm04 bash[34817]: audit 2026-04-16T19:32:47.957084+0000 mon.vm01 (mon.0) 1165 : audit [DBG] from='client.? 192.168.123.101:0/1355531110' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:49.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:48 vm04 bash[34817]: audit 2026-04-16T19:32:47.957084+0000 mon.vm01 (mon.0) 1165 : audit [DBG] from='client.? 192.168.123.101:0/1355531110' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:49.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:48 vm01 bash[28222]: audit 2026-04-16T19:32:47.512097+0000 mgr.vm01.nwhpas (mgr.14227) 669 : audit [DBG] from='client.15964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:49.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:48 vm01 bash[28222]: audit 2026-04-16T19:32:47.512097+0000 mgr.vm01.nwhpas (mgr.14227) 669 : audit [DBG] from='client.15964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:49.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:48 vm01 bash[28222]: audit 2026-04-16T19:32:47.709915+0000 mgr.vm01.nwhpas (mgr.14227) 670 : audit [DBG] from='client.15968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:49.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:48 vm01 bash[28222]: audit 2026-04-16T19:32:47.709915+0000 mgr.vm01.nwhpas (mgr.14227) 670 : audit [DBG] from='client.15968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:49.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:48 vm01 bash[28222]: audit 2026-04-16T19:32:47.957084+0000 mon.vm01 (mon.0) 1165 : audit [DBG] from='client.? 192.168.123.101:0/1355531110' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:49.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:48 vm01 bash[28222]: audit 2026-04-16T19:32:47.957084+0000 mon.vm01 (mon.0) 1165 : audit [DBG] from='client.? 192.168.123.101:0/1355531110' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:50.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:49 vm04 bash[34817]: cluster 2026-04-16T19:32:48.102400+0000 mgr.vm01.nwhpas (mgr.14227) 671 : cluster [DBG] pgmap v367: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:50.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:49 vm04 bash[34817]: cluster 2026-04-16T19:32:48.102400+0000 mgr.vm01.nwhpas (mgr.14227) 671 : cluster [DBG] pgmap v367: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:50.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:49 vm01 bash[28222]: cluster 2026-04-16T19:32:48.102400+0000 mgr.vm01.nwhpas (mgr.14227) 671 : cluster [DBG] pgmap v367: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:50.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:49 vm01 bash[28222]: cluster 2026-04-16T19:32:48.102400+0000 mgr.vm01.nwhpas (mgr.14227) 671 : cluster [DBG] pgmap v367: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:52.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:51 vm04 bash[34817]: cluster 2026-04-16T19:32:50.102861+0000 mgr.vm01.nwhpas (mgr.14227) 672 : cluster [DBG] pgmap v368: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:52.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:51 vm04 bash[34817]: cluster 2026-04-16T19:32:50.102861+0000 mgr.vm01.nwhpas (mgr.14227) 672 : cluster [DBG] pgmap v368: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:52.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:51 vm01 bash[28222]: cluster 2026-04-16T19:32:50.102861+0000 mgr.vm01.nwhpas (mgr.14227) 672 : cluster [DBG] pgmap v368: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:52.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:51 vm01 bash[28222]: cluster 2026-04-16T19:32:50.102861+0000 mgr.vm01.nwhpas (mgr.14227) 672 : cluster [DBG] pgmap v368: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:53.169 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:32:53.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:52 vm04 bash[34817]: audit 2026-04-16T19:32:52.578025+0000 mon.vm01 (mon.0) 1166 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:53.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:52 vm04 bash[34817]: audit 2026-04-16T19:32:52.578025+0000 mon.vm01 (mon.0) 1166 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:53.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:52 vm01 bash[28222]: audit 2026-04-16T19:32:52.578025+0000 mon.vm01 (mon.0) 1166 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:53.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:52 vm01 bash[28222]: audit 2026-04-16T19:32:52.578025+0000 mon.vm01 (mon.0) 1166 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:32:53.356 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:32:53.356 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (8m) 3m ago 9m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:32:53.356 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (3m) 3m ago 9m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:32:53.356 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 3m ago 9m - - 2026-04-16T19:32:53.356 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (9m) 3m ago 9m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:32:53.590 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:32:53.590 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:32:53.590 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:32:54.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:53 vm04 bash[34817]: cluster 2026-04-16T19:32:52.103276+0000 mgr.vm01.nwhpas (mgr.14227) 673 : cluster [DBG] pgmap v369: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:54.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:53 vm04 bash[34817]: cluster 2026-04-16T19:32:52.103276+0000 mgr.vm01.nwhpas (mgr.14227) 673 : cluster [DBG] pgmap v369: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:54.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:53 vm04 bash[34817]: audit 2026-04-16T19:32:53.585833+0000 mon.vm01 (mon.0) 1167 : audit [DBG] from='client.? 192.168.123.101:0/1717364718' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:54.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:53 vm04 bash[34817]: audit 2026-04-16T19:32:53.585833+0000 mon.vm01 (mon.0) 1167 : audit [DBG] from='client.? 192.168.123.101:0/1717364718' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:54.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:53 vm01 bash[28222]: cluster 2026-04-16T19:32:52.103276+0000 mgr.vm01.nwhpas (mgr.14227) 673 : cluster [DBG] pgmap v369: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:54.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:53 vm01 bash[28222]: cluster 2026-04-16T19:32:52.103276+0000 mgr.vm01.nwhpas (mgr.14227) 673 : cluster [DBG] pgmap v369: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:54.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:53 vm01 bash[28222]: audit 2026-04-16T19:32:53.585833+0000 mon.vm01 (mon.0) 1167 : audit [DBG] from='client.? 192.168.123.101:0/1717364718' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:54.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:53 vm01 bash[28222]: audit 2026-04-16T19:32:53.585833+0000 mon.vm01 (mon.0) 1167 : audit [DBG] from='client.? 192.168.123.101:0/1717364718' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:32:55.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:54 vm04 bash[34817]: audit 2026-04-16T19:32:53.147518+0000 mgr.vm01.nwhpas (mgr.14227) 674 : audit [DBG] from='client.15976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:55.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:54 vm04 bash[34817]: audit 2026-04-16T19:32:53.147518+0000 mgr.vm01.nwhpas (mgr.14227) 674 : audit [DBG] from='client.15976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:55.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:54 vm04 bash[34817]: audit 2026-04-16T19:32:53.348889+0000 mgr.vm01.nwhpas (mgr.14227) 675 : audit [DBG] from='client.15980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:55.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:54 vm04 bash[34817]: audit 2026-04-16T19:32:53.348889+0000 mgr.vm01.nwhpas (mgr.14227) 675 : audit [DBG] from='client.15980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:54 vm01 bash[28222]: audit 2026-04-16T19:32:53.147518+0000 mgr.vm01.nwhpas (mgr.14227) 674 : audit [DBG] from='client.15976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:54 vm01 bash[28222]: audit 2026-04-16T19:32:53.147518+0000 mgr.vm01.nwhpas (mgr.14227) 674 : audit [DBG] from='client.15976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:54 vm01 bash[28222]: audit 2026-04-16T19:32:53.348889+0000 mgr.vm01.nwhpas (mgr.14227) 675 : audit [DBG] from='client.15980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:55.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:54 vm01 bash[28222]: audit 2026-04-16T19:32:53.348889+0000 mgr.vm01.nwhpas (mgr.14227) 675 : audit [DBG] from='client.15980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:32:56.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:55 vm04 bash[34817]: cluster 2026-04-16T19:32:54.103721+0000 mgr.vm01.nwhpas (mgr.14227) 676 : cluster [DBG] pgmap v370: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:56.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:55 vm04 bash[34817]: cluster 2026-04-16T19:32:54.103721+0000 mgr.vm01.nwhpas (mgr.14227) 676 : cluster [DBG] pgmap v370: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:56.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:55 vm01 bash[28222]: cluster 2026-04-16T19:32:54.103721+0000 mgr.vm01.nwhpas (mgr.14227) 676 : cluster [DBG] pgmap v370: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:56.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:55 vm01 bash[28222]: cluster 2026-04-16T19:32:54.103721+0000 mgr.vm01.nwhpas (mgr.14227) 676 : cluster [DBG] pgmap v370: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:32:58.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:57 vm04 bash[34817]: cluster 2026-04-16T19:32:56.104128+0000 mgr.vm01.nwhpas (mgr.14227) 677 : cluster [DBG] pgmap v371: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:58.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:57 vm04 bash[34817]: cluster 2026-04-16T19:32:56.104128+0000 mgr.vm01.nwhpas (mgr.14227) 677 : cluster [DBG] pgmap v371: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:58.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:57 vm01 bash[28222]: cluster 2026-04-16T19:32:56.104128+0000 mgr.vm01.nwhpas (mgr.14227) 677 : cluster [DBG] pgmap v371: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:58.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:57 vm01 bash[28222]: cluster 2026-04-16T19:32:56.104128+0000 mgr.vm01.nwhpas (mgr.14227) 677 : cluster [DBG] pgmap v371: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:32:58.793 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:32:58.973 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:32:58.973 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (9m) 3m ago 9m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:32:58.973 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (3m) 3m ago 9m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:32:58.973 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 3m ago 9m - - 2026-04-16T19:32:58.973 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (9m) 3m ago 9m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:32:59.201 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:32:59.201 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:32:59.201 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:33:00.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:59 vm04 bash[34817]: cluster 2026-04-16T19:32:58.104497+0000 mgr.vm01.nwhpas (mgr.14227) 678 : cluster [DBG] pgmap v372: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:00.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:59 vm04 bash[34817]: cluster 2026-04-16T19:32:58.104497+0000 mgr.vm01.nwhpas (mgr.14227) 678 : cluster [DBG] pgmap v372: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:00.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:59 vm04 bash[34817]: audit 2026-04-16T19:32:58.770991+0000 mgr.vm01.nwhpas (mgr.14227) 679 : audit [DBG] from='client.15988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:00.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:59 vm04 bash[34817]: audit 2026-04-16T19:32:58.770991+0000 mgr.vm01.nwhpas (mgr.14227) 679 : audit [DBG] from='client.15988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:00.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:59 vm04 bash[34817]: audit 2026-04-16T19:32:59.197005+0000 mon.vm01 (mon.0) 1168 : audit [DBG] from='client.? 192.168.123.101:0/1458850934' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:00.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:32:59 vm04 bash[34817]: audit 2026-04-16T19:32:59.197005+0000 mon.vm01 (mon.0) 1168 : audit [DBG] from='client.? 192.168.123.101:0/1458850934' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:00.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:59 vm01 bash[28222]: cluster 2026-04-16T19:32:58.104497+0000 mgr.vm01.nwhpas (mgr.14227) 678 : cluster [DBG] pgmap v372: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:00.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:59 vm01 bash[28222]: cluster 2026-04-16T19:32:58.104497+0000 mgr.vm01.nwhpas (mgr.14227) 678 : cluster [DBG] pgmap v372: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:00.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:59 vm01 bash[28222]: audit 2026-04-16T19:32:58.770991+0000 mgr.vm01.nwhpas (mgr.14227) 679 : audit [DBG] from='client.15988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:00.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:59 vm01 bash[28222]: audit 2026-04-16T19:32:58.770991+0000 mgr.vm01.nwhpas (mgr.14227) 679 : audit [DBG] from='client.15988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:00.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:59 vm01 bash[28222]: audit 2026-04-16T19:32:59.197005+0000 mon.vm01 (mon.0) 1168 : audit [DBG] from='client.? 192.168.123.101:0/1458850934' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:00.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:32:59 vm01 bash[28222]: audit 2026-04-16T19:32:59.197005+0000 mon.vm01 (mon.0) 1168 : audit [DBG] from='client.? 192.168.123.101:0/1458850934' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:01.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:00 vm04 bash[34817]: audit 2026-04-16T19:32:58.965722+0000 mgr.vm01.nwhpas (mgr.14227) 680 : audit [DBG] from='client.15992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:01.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:00 vm04 bash[34817]: audit 2026-04-16T19:32:58.965722+0000 mgr.vm01.nwhpas (mgr.14227) 680 : audit [DBG] from='client.15992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:01.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:00 vm01 bash[28222]: audit 2026-04-16T19:32:58.965722+0000 mgr.vm01.nwhpas (mgr.14227) 680 : audit [DBG] from='client.15992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:01.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:00 vm01 bash[28222]: audit 2026-04-16T19:32:58.965722+0000 mgr.vm01.nwhpas (mgr.14227) 680 : audit [DBG] from='client.15992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:02.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:01 vm04 bash[34817]: cluster 2026-04-16T19:33:00.104904+0000 mgr.vm01.nwhpas (mgr.14227) 681 : cluster [DBG] pgmap v373: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:02.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:01 vm04 bash[34817]: cluster 2026-04-16T19:33:00.104904+0000 mgr.vm01.nwhpas (mgr.14227) 681 : cluster [DBG] pgmap v373: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:02.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:01 vm01 bash[28222]: cluster 2026-04-16T19:33:00.104904+0000 mgr.vm01.nwhpas (mgr.14227) 681 : cluster [DBG] pgmap v373: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:02.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:01 vm01 bash[28222]: cluster 2026-04-16T19:33:00.104904+0000 mgr.vm01.nwhpas (mgr.14227) 681 : cluster [DBG] pgmap v373: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:04.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:03 vm04 bash[34817]: cluster 2026-04-16T19:33:02.105229+0000 mgr.vm01.nwhpas (mgr.14227) 682 : cluster [DBG] pgmap v374: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:04.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:03 vm04 bash[34817]: cluster 2026-04-16T19:33:02.105229+0000 mgr.vm01.nwhpas (mgr.14227) 682 : cluster [DBG] pgmap v374: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:04.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:03 vm01 bash[28222]: cluster 2026-04-16T19:33:02.105229+0000 mgr.vm01.nwhpas (mgr.14227) 682 : cluster [DBG] pgmap v374: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:04.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:03 vm01 bash[28222]: cluster 2026-04-16T19:33:02.105229+0000 mgr.vm01.nwhpas (mgr.14227) 682 : cluster [DBG] pgmap v374: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:04.409 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:33:04.597 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:33:04.597 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (9m) 3m ago 10m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:33:04.597 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (3m) 3m ago 9m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:33:04.597 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 3m ago 10m - - 2026-04-16T19:33:04.597 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (10m) 3m ago 10m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:33:04.823 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:33:04.823 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:33:04.823 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:33:05.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:04 vm04 bash[34817]: audit 2026-04-16T19:33:04.818488+0000 mon.vm01 (mon.0) 1169 : audit [DBG] from='client.? 192.168.123.101:0/3035804550' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:05.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:04 vm04 bash[34817]: audit 2026-04-16T19:33:04.818488+0000 mon.vm01 (mon.0) 1169 : audit [DBG] from='client.? 192.168.123.101:0/3035804550' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:05.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:04 vm01 bash[28222]: audit 2026-04-16T19:33:04.818488+0000 mon.vm01 (mon.0) 1169 : audit [DBG] from='client.? 192.168.123.101:0/3035804550' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:05.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:04 vm01 bash[28222]: audit 2026-04-16T19:33:04.818488+0000 mon.vm01 (mon.0) 1169 : audit [DBG] from='client.? 192.168.123.101:0/3035804550' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:06.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:05 vm04 bash[34817]: cluster 2026-04-16T19:33:04.105690+0000 mgr.vm01.nwhpas (mgr.14227) 683 : cluster [DBG] pgmap v375: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:06.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:05 vm04 bash[34817]: cluster 2026-04-16T19:33:04.105690+0000 mgr.vm01.nwhpas (mgr.14227) 683 : cluster [DBG] pgmap v375: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:06.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:05 vm04 bash[34817]: audit 2026-04-16T19:33:04.386062+0000 mgr.vm01.nwhpas (mgr.14227) 684 : audit [DBG] from='client.16000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:06.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:05 vm04 bash[34817]: audit 2026-04-16T19:33:04.386062+0000 mgr.vm01.nwhpas (mgr.14227) 684 : audit [DBG] from='client.16000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:06.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:05 vm04 bash[34817]: audit 2026-04-16T19:33:04.590125+0000 mgr.vm01.nwhpas (mgr.14227) 685 : audit [DBG] from='client.16004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:06.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:05 vm04 bash[34817]: audit 2026-04-16T19:33:04.590125+0000 mgr.vm01.nwhpas (mgr.14227) 685 : audit [DBG] from='client.16004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:06.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:05 vm01 bash[28222]: cluster 2026-04-16T19:33:04.105690+0000 mgr.vm01.nwhpas (mgr.14227) 683 : cluster [DBG] pgmap v375: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:06.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:05 vm01 bash[28222]: cluster 2026-04-16T19:33:04.105690+0000 mgr.vm01.nwhpas (mgr.14227) 683 : cluster [DBG] pgmap v375: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:06.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:05 vm01 bash[28222]: audit 2026-04-16T19:33:04.386062+0000 mgr.vm01.nwhpas (mgr.14227) 684 : audit [DBG] from='client.16000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:06.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:05 vm01 bash[28222]: audit 2026-04-16T19:33:04.386062+0000 mgr.vm01.nwhpas (mgr.14227) 684 : audit [DBG] from='client.16000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:06.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:05 vm01 bash[28222]: audit 2026-04-16T19:33:04.590125+0000 mgr.vm01.nwhpas (mgr.14227) 685 : audit [DBG] from='client.16004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:06.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:05 vm01 bash[28222]: audit 2026-04-16T19:33:04.590125+0000 mgr.vm01.nwhpas (mgr.14227) 685 : audit [DBG] from='client.16004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:08.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:07 vm04 bash[34817]: cluster 2026-04-16T19:33:06.106047+0000 mgr.vm01.nwhpas (mgr.14227) 686 : cluster [DBG] pgmap v376: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:08.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:07 vm04 bash[34817]: cluster 2026-04-16T19:33:06.106047+0000 mgr.vm01.nwhpas (mgr.14227) 686 : cluster [DBG] pgmap v376: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:08.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:07 vm04 bash[34817]: audit 2026-04-16T19:33:07.578231+0000 mon.vm01 (mon.0) 1170 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:08.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:07 vm04 bash[34817]: audit 2026-04-16T19:33:07.578231+0000 mon.vm01 (mon.0) 1170 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:07 vm01 bash[28222]: cluster 2026-04-16T19:33:06.106047+0000 mgr.vm01.nwhpas (mgr.14227) 686 : cluster [DBG] pgmap v376: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:07 vm01 bash[28222]: cluster 2026-04-16T19:33:06.106047+0000 mgr.vm01.nwhpas (mgr.14227) 686 : cluster [DBG] pgmap v376: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:07 vm01 bash[28222]: audit 2026-04-16T19:33:07.578231+0000 mon.vm01 (mon.0) 1170 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:08.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:07 vm01 bash[28222]: audit 2026-04-16T19:33:07.578231+0000 mon.vm01 (mon.0) 1170 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:10.036 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:33:10.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:09 vm04 bash[34817]: cluster 2026-04-16T19:33:08.106479+0000 mgr.vm01.nwhpas (mgr.14227) 687 : cluster [DBG] pgmap v377: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:10.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:09 vm04 bash[34817]: cluster 2026-04-16T19:33:08.106479+0000 mgr.vm01.nwhpas (mgr.14227) 687 : cluster [DBG] pgmap v377: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:10.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:09 vm01 bash[28222]: cluster 2026-04-16T19:33:08.106479+0000 mgr.vm01.nwhpas (mgr.14227) 687 : cluster [DBG] pgmap v377: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:10.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:09 vm01 bash[28222]: cluster 2026-04-16T19:33:08.106479+0000 mgr.vm01.nwhpas (mgr.14227) 687 : cluster [DBG] pgmap v377: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:10.223 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:33:10.223 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (9m) 3m ago 10m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:33:10.223 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (3m) 3m ago 10m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:33:10.223 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 3m ago 10m - - 2026-04-16T19:33:10.223 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (10m) 3m ago 10m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:33:10.456 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:33:10.456 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:33:10.456 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:33:11.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:10 vm04 bash[34817]: audit 2026-04-16T19:33:10.450222+0000 mon.vm01 (mon.0) 1171 : audit [DBG] from='client.? 192.168.123.101:0/1131462919' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:11.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:10 vm04 bash[34817]: audit 2026-04-16T19:33:10.450222+0000 mon.vm01 (mon.0) 1171 : audit [DBG] from='client.? 192.168.123.101:0/1131462919' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:11.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:10 vm01 bash[28222]: audit 2026-04-16T19:33:10.450222+0000 mon.vm01 (mon.0) 1171 : audit [DBG] from='client.? 192.168.123.101:0/1131462919' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:11.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:10 vm01 bash[28222]: audit 2026-04-16T19:33:10.450222+0000 mon.vm01 (mon.0) 1171 : audit [DBG] from='client.? 192.168.123.101:0/1131462919' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:12.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:11 vm04 bash[34817]: audit 2026-04-16T19:33:10.015146+0000 mgr.vm01.nwhpas (mgr.14227) 688 : audit [DBG] from='client.16012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:12.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:11 vm04 bash[34817]: audit 2026-04-16T19:33:10.015146+0000 mgr.vm01.nwhpas (mgr.14227) 688 : audit [DBG] from='client.16012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:12.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:11 vm04 bash[34817]: cluster 2026-04-16T19:33:10.106948+0000 mgr.vm01.nwhpas (mgr.14227) 689 : cluster [DBG] pgmap v378: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:12.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:11 vm04 bash[34817]: cluster 2026-04-16T19:33:10.106948+0000 mgr.vm01.nwhpas (mgr.14227) 689 : cluster [DBG] pgmap v378: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:12.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:11 vm04 bash[34817]: audit 2026-04-16T19:33:10.216078+0000 mgr.vm01.nwhpas (mgr.14227) 690 : audit [DBG] from='client.16016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:11 vm04 bash[34817]: audit 2026-04-16T19:33:10.216078+0000 mgr.vm01.nwhpas (mgr.14227) 690 : audit [DBG] from='client.16016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:12.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:11 vm01 bash[28222]: audit 2026-04-16T19:33:10.015146+0000 mgr.vm01.nwhpas (mgr.14227) 688 : audit [DBG] from='client.16012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:12.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:11 vm01 bash[28222]: audit 2026-04-16T19:33:10.015146+0000 mgr.vm01.nwhpas (mgr.14227) 688 : audit [DBG] from='client.16012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:12.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:11 vm01 bash[28222]: cluster 2026-04-16T19:33:10.106948+0000 mgr.vm01.nwhpas (mgr.14227) 689 : cluster [DBG] pgmap v378: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:12.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:11 vm01 bash[28222]: cluster 2026-04-16T19:33:10.106948+0000 mgr.vm01.nwhpas (mgr.14227) 689 : cluster [DBG] pgmap v378: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:12.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:11 vm01 bash[28222]: audit 2026-04-16T19:33:10.216078+0000 mgr.vm01.nwhpas (mgr.14227) 690 : audit [DBG] from='client.16016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:12.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:11 vm01 bash[28222]: audit 2026-04-16T19:33:10.216078+0000 mgr.vm01.nwhpas (mgr.14227) 690 : audit [DBG] from='client.16016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:14.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:14 vm04 bash[34817]: cluster 2026-04-16T19:33:12.107353+0000 mgr.vm01.nwhpas (mgr.14227) 691 : cluster [DBG] pgmap v379: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:14.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:14 vm04 bash[34817]: cluster 2026-04-16T19:33:12.107353+0000 mgr.vm01.nwhpas (mgr.14227) 691 : cluster [DBG] pgmap v379: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:14.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:14 vm01 bash[28222]: cluster 2026-04-16T19:33:12.107353+0000 mgr.vm01.nwhpas (mgr.14227) 691 : cluster [DBG] pgmap v379: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:14.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:14 vm01 bash[28222]: cluster 2026-04-16T19:33:12.107353+0000 mgr.vm01.nwhpas (mgr.14227) 691 : cluster [DBG] pgmap v379: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:15.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:15 vm04 bash[34817]: cluster 2026-04-16T19:33:14.107831+0000 mgr.vm01.nwhpas (mgr.14227) 692 : cluster [DBG] pgmap v380: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 170 B/s wr, 36 op/s 2026-04-16T19:33:15.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:15 vm04 bash[34817]: cluster 2026-04-16T19:33:14.107831+0000 mgr.vm01.nwhpas (mgr.14227) 692 : cluster [DBG] pgmap v380: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 170 B/s wr, 36 op/s 2026-04-16T19:33:15.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:15 vm01 bash[28222]: cluster 2026-04-16T19:33:14.107831+0000 mgr.vm01.nwhpas (mgr.14227) 692 : cluster [DBG] pgmap v380: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 170 B/s wr, 36 op/s 2026-04-16T19:33:15.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:15 vm01 bash[28222]: cluster 2026-04-16T19:33:14.107831+0000 mgr.vm01.nwhpas (mgr.14227) 692 : cluster [DBG] pgmap v380: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 170 B/s wr, 36 op/s 2026-04-16T19:33:15.706 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:33:15.908 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:33:15.908 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (9m) 3m ago 10m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:33:15.908 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (4m) 3m ago 10m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:33:15.908 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 3m ago 10m - - 2026-04-16T19:33:15.908 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (10m) 3m ago 10m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:33:16.178 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:33:16.178 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:33:16.178 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:33:16.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:16 vm04 bash[34817]: audit 2026-04-16T19:33:15.678454+0000 mgr.vm01.nwhpas (mgr.14227) 693 : audit [DBG] from='client.16024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:16.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:16 vm04 bash[34817]: audit 2026-04-16T19:33:15.678454+0000 mgr.vm01.nwhpas (mgr.14227) 693 : audit [DBG] from='client.16024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:16.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:16 vm04 bash[34817]: audit 2026-04-16T19:33:15.900517+0000 mgr.vm01.nwhpas (mgr.14227) 694 : audit [DBG] from='client.16028 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:16.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:16 vm04 bash[34817]: audit 2026-04-16T19:33:15.900517+0000 mgr.vm01.nwhpas (mgr.14227) 694 : audit [DBG] from='client.16028 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:16.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:16 vm01 bash[28222]: audit 2026-04-16T19:33:15.678454+0000 mgr.vm01.nwhpas (mgr.14227) 693 : audit [DBG] from='client.16024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:16.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:16 vm01 bash[28222]: audit 2026-04-16T19:33:15.678454+0000 mgr.vm01.nwhpas (mgr.14227) 693 : audit [DBG] from='client.16024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:16.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:16 vm01 bash[28222]: audit 2026-04-16T19:33:15.900517+0000 mgr.vm01.nwhpas (mgr.14227) 694 : audit [DBG] from='client.16028 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:16.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:16 vm01 bash[28222]: audit 2026-04-16T19:33:15.900517+0000 mgr.vm01.nwhpas (mgr.14227) 694 : audit [DBG] from='client.16028 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:17.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:17 vm04 bash[34817]: cluster 2026-04-16T19:33:16.108384+0000 mgr.vm01.nwhpas (mgr.14227) 695 : cluster [DBG] pgmap v381: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:33:17.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:17 vm04 bash[34817]: cluster 2026-04-16T19:33:16.108384+0000 mgr.vm01.nwhpas (mgr.14227) 695 : cluster [DBG] pgmap v381: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:33:17.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:17 vm04 bash[34817]: audit 2026-04-16T19:33:16.173698+0000 mon.vm01 (mon.0) 1172 : audit [DBG] from='client.? 192.168.123.101:0/662437484' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:17.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:17 vm04 bash[34817]: audit 2026-04-16T19:33:16.173698+0000 mon.vm01 (mon.0) 1172 : audit [DBG] from='client.? 192.168.123.101:0/662437484' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:17.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:17 vm01 bash[28222]: cluster 2026-04-16T19:33:16.108384+0000 mgr.vm01.nwhpas (mgr.14227) 695 : cluster [DBG] pgmap v381: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:33:17.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:17 vm01 bash[28222]: cluster 2026-04-16T19:33:16.108384+0000 mgr.vm01.nwhpas (mgr.14227) 695 : cluster [DBG] pgmap v381: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:33:17.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:17 vm01 bash[28222]: audit 2026-04-16T19:33:16.173698+0000 mon.vm01 (mon.0) 1172 : audit [DBG] from='client.? 192.168.123.101:0/662437484' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:17.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:17 vm01 bash[28222]: audit 2026-04-16T19:33:16.173698+0000 mon.vm01 (mon.0) 1172 : audit [DBG] from='client.? 192.168.123.101:0/662437484' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:19.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:19 vm04 bash[34817]: cluster 2026-04-16T19:33:18.108798+0000 mgr.vm01.nwhpas (mgr.14227) 696 : cluster [DBG] pgmap v382: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:19.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:19 vm04 bash[34817]: cluster 2026-04-16T19:33:18.108798+0000 mgr.vm01.nwhpas (mgr.14227) 696 : cluster [DBG] pgmap v382: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:19.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:19 vm01 bash[28222]: cluster 2026-04-16T19:33:18.108798+0000 mgr.vm01.nwhpas (mgr.14227) 696 : cluster [DBG] pgmap v382: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:19.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:19 vm01 bash[28222]: cluster 2026-04-16T19:33:18.108798+0000 mgr.vm01.nwhpas (mgr.14227) 696 : cluster [DBG] pgmap v382: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:21.412 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:33:21.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:21 vm04 bash[34817]: cluster 2026-04-16T19:33:20.109304+0000 mgr.vm01.nwhpas (mgr.14227) 697 : cluster [DBG] pgmap v383: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:21.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:21 vm04 bash[34817]: cluster 2026-04-16T19:33:20.109304+0000 mgr.vm01.nwhpas (mgr.14227) 697 : cluster [DBG] pgmap v383: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:21.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:21 vm01 bash[28222]: cluster 2026-04-16T19:33:20.109304+0000 mgr.vm01.nwhpas (mgr.14227) 697 : cluster [DBG] pgmap v383: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:21.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:21 vm01 bash[28222]: cluster 2026-04-16T19:33:20.109304+0000 mgr.vm01.nwhpas (mgr.14227) 697 : cluster [DBG] pgmap v383: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:21.612 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:33:21.612 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (9m) 4m ago 10m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:33:21.612 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (4m) 4m ago 10m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:33:21.612 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 3m ago 10m - - 2026-04-16T19:33:21.612 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (10m) 3m ago 10m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:33:21.861 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:33:21.861 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:33:21.861 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:33:22.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:22 vm04 bash[34817]: audit 2026-04-16T19:33:21.389803+0000 mgr.vm01.nwhpas (mgr.14227) 698 : audit [DBG] from='client.16036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:22.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:22 vm04 bash[34817]: audit 2026-04-16T19:33:21.389803+0000 mgr.vm01.nwhpas (mgr.14227) 698 : audit [DBG] from='client.16036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:22.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:22 vm04 bash[34817]: audit 2026-04-16T19:33:21.604343+0000 mgr.vm01.nwhpas (mgr.14227) 699 : audit [DBG] from='client.16040 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:22.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:22 vm04 bash[34817]: audit 2026-04-16T19:33:21.604343+0000 mgr.vm01.nwhpas (mgr.14227) 699 : audit [DBG] from='client.16040 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:22.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:22 vm04 bash[34817]: audit 2026-04-16T19:33:21.856074+0000 mon.vm01 (mon.0) 1173 : audit [DBG] from='client.? 192.168.123.101:0/1016498619' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:22.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:22 vm04 bash[34817]: audit 2026-04-16T19:33:21.856074+0000 mon.vm01 (mon.0) 1173 : audit [DBG] from='client.? 192.168.123.101:0/1016498619' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:22.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:22 vm01 bash[28222]: audit 2026-04-16T19:33:21.389803+0000 mgr.vm01.nwhpas (mgr.14227) 698 : audit [DBG] from='client.16036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:22.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:22 vm01 bash[28222]: audit 2026-04-16T19:33:21.389803+0000 mgr.vm01.nwhpas (mgr.14227) 698 : audit [DBG] from='client.16036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:22.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:22 vm01 bash[28222]: audit 2026-04-16T19:33:21.604343+0000 mgr.vm01.nwhpas (mgr.14227) 699 : audit [DBG] from='client.16040 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:22.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:22 vm01 bash[28222]: audit 2026-04-16T19:33:21.604343+0000 mgr.vm01.nwhpas (mgr.14227) 699 : audit [DBG] from='client.16040 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:22.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:22 vm01 bash[28222]: audit 2026-04-16T19:33:21.856074+0000 mon.vm01 (mon.0) 1173 : audit [DBG] from='client.? 192.168.123.101:0/1016498619' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:22.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:22 vm01 bash[28222]: audit 2026-04-16T19:33:21.856074+0000 mon.vm01 (mon.0) 1173 : audit [DBG] from='client.? 192.168.123.101:0/1016498619' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:23.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:23 vm04 bash[34817]: cluster 2026-04-16T19:33:22.109873+0000 mgr.vm01.nwhpas (mgr.14227) 700 : cluster [DBG] pgmap v384: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:23.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:23 vm04 bash[34817]: cluster 2026-04-16T19:33:22.109873+0000 mgr.vm01.nwhpas (mgr.14227) 700 : cluster [DBG] pgmap v384: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:23.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:23 vm04 bash[34817]: audit 2026-04-16T19:33:22.578725+0000 mon.vm01 (mon.0) 1174 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:23.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:23 vm04 bash[34817]: audit 2026-04-16T19:33:22.578725+0000 mon.vm01 (mon.0) 1174 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:23.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:23 vm01 bash[28222]: cluster 2026-04-16T19:33:22.109873+0000 mgr.vm01.nwhpas (mgr.14227) 700 : cluster [DBG] pgmap v384: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:23.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:23 vm01 bash[28222]: cluster 2026-04-16T19:33:22.109873+0000 mgr.vm01.nwhpas (mgr.14227) 700 : cluster [DBG] pgmap v384: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:23.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:23 vm01 bash[28222]: audit 2026-04-16T19:33:22.578725+0000 mon.vm01 (mon.0) 1174 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:23.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:23 vm01 bash[28222]: audit 2026-04-16T19:33:22.578725+0000 mon.vm01 (mon.0) 1174 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:25.460 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:25 vm04 bash[34817]: cluster 2026-04-16T19:33:24.110431+0000 mgr.vm01.nwhpas (mgr.14227) 701 : cluster [DBG] pgmap v385: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:25.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:25 vm04 bash[34817]: cluster 2026-04-16T19:33:24.110431+0000 mgr.vm01.nwhpas (mgr.14227) 701 : cluster [DBG] pgmap v385: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:25.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:25 vm01 bash[28222]: cluster 2026-04-16T19:33:24.110431+0000 mgr.vm01.nwhpas (mgr.14227) 701 : cluster [DBG] pgmap v385: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:25.462 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:25 vm01 bash[28222]: cluster 2026-04-16T19:33:24.110431+0000 mgr.vm01.nwhpas (mgr.14227) 701 : cluster [DBG] pgmap v385: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:33:27.095 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:33:27.309 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:33:27.309 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (9m) 4m ago 10m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:33:27.309 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (4m) 4m ago 10m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:33:27.309 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 3m ago 10m - - 2026-04-16T19:33:27.309 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (10m) 3m ago 10m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:33:27.564 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:33:27.564 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:33:27.564 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:33:27.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:27 vm04 bash[34817]: cluster 2026-04-16T19:33:26.111043+0000 mgr.vm01.nwhpas (mgr.14227) 702 : cluster [DBG] pgmap v386: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:33:27.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:27 vm04 bash[34817]: cluster 2026-04-16T19:33:26.111043+0000 mgr.vm01.nwhpas (mgr.14227) 702 : cluster [DBG] pgmap v386: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:33:27.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:27 vm01 bash[28222]: cluster 2026-04-16T19:33:26.111043+0000 mgr.vm01.nwhpas (mgr.14227) 702 : cluster [DBG] pgmap v386: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:33:27.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:27 vm01 bash[28222]: cluster 2026-04-16T19:33:26.111043+0000 mgr.vm01.nwhpas (mgr.14227) 702 : cluster [DBG] pgmap v386: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-16T19:33:28.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:28 vm04 bash[34817]: audit 2026-04-16T19:33:27.072955+0000 mgr.vm01.nwhpas (mgr.14227) 703 : audit [DBG] from='client.16048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:28.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:28 vm04 bash[34817]: audit 2026-04-16T19:33:27.072955+0000 mgr.vm01.nwhpas (mgr.14227) 703 : audit [DBG] from='client.16048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:28.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:28 vm04 bash[34817]: audit 2026-04-16T19:33:27.301367+0000 mgr.vm01.nwhpas (mgr.14227) 704 : audit [DBG] from='client.16052 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:28.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:28 vm04 bash[34817]: audit 2026-04-16T19:33:27.301367+0000 mgr.vm01.nwhpas (mgr.14227) 704 : audit [DBG] from='client.16052 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:28.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:28 vm04 bash[34817]: audit 2026-04-16T19:33:27.559297+0000 mon.vm01 (mon.0) 1175 : audit [DBG] from='client.? 192.168.123.101:0/1752581563' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:28.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:28 vm04 bash[34817]: audit 2026-04-16T19:33:27.559297+0000 mon.vm01 (mon.0) 1175 : audit [DBG] from='client.? 192.168.123.101:0/1752581563' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:28.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:28 vm01 bash[28222]: audit 2026-04-16T19:33:27.072955+0000 mgr.vm01.nwhpas (mgr.14227) 703 : audit [DBG] from='client.16048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:28.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:28 vm01 bash[28222]: audit 2026-04-16T19:33:27.072955+0000 mgr.vm01.nwhpas (mgr.14227) 703 : audit [DBG] from='client.16048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:28.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:28 vm01 bash[28222]: audit 2026-04-16T19:33:27.301367+0000 mgr.vm01.nwhpas (mgr.14227) 704 : audit [DBG] from='client.16052 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:28.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:28 vm01 bash[28222]: audit 2026-04-16T19:33:27.301367+0000 mgr.vm01.nwhpas (mgr.14227) 704 : audit [DBG] from='client.16052 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:28.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:28 vm01 bash[28222]: audit 2026-04-16T19:33:27.559297+0000 mon.vm01 (mon.0) 1175 : audit [DBG] from='client.? 192.168.123.101:0/1752581563' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:28.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:28 vm01 bash[28222]: audit 2026-04-16T19:33:27.559297+0000 mon.vm01 (mon.0) 1175 : audit [DBG] from='client.? 192.168.123.101:0/1752581563' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:29.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:29 vm04 bash[34817]: cluster 2026-04-16T19:33:28.111545+0000 mgr.vm01.nwhpas (mgr.14227) 705 : cluster [DBG] pgmap v387: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:29.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:29 vm04 bash[34817]: cluster 2026-04-16T19:33:28.111545+0000 mgr.vm01.nwhpas (mgr.14227) 705 : cluster [DBG] pgmap v387: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:29.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:29 vm01 bash[28222]: cluster 2026-04-16T19:33:28.111545+0000 mgr.vm01.nwhpas (mgr.14227) 705 : cluster [DBG] pgmap v387: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:29.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:29 vm01 bash[28222]: cluster 2026-04-16T19:33:28.111545+0000 mgr.vm01.nwhpas (mgr.14227) 705 : cluster [DBG] pgmap v387: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:31.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:31 vm04 bash[34817]: cluster 2026-04-16T19:33:30.111968+0000 mgr.vm01.nwhpas (mgr.14227) 706 : cluster [DBG] pgmap v388: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:31.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:31 vm04 bash[34817]: cluster 2026-04-16T19:33:30.111968+0000 mgr.vm01.nwhpas (mgr.14227) 706 : cluster [DBG] pgmap v388: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:31.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:31 vm01 bash[28222]: cluster 2026-04-16T19:33:30.111968+0000 mgr.vm01.nwhpas (mgr.14227) 706 : cluster [DBG] pgmap v388: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:31.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:31 vm01 bash[28222]: cluster 2026-04-16T19:33:30.111968+0000 mgr.vm01.nwhpas (mgr.14227) 706 : cluster [DBG] pgmap v388: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:32.811 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:33:33.022 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:33:33.023 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (9m) 4m ago 10m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:33:33.023 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (4m) 4m ago 10m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:33:33.023 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 3m ago 10m - - 2026-04-16T19:33:33.023 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (10m) 3m ago 10m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:33:33.316 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:33:33.316 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:33:33.316 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:33:33.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:33 vm04 bash[34817]: cluster 2026-04-16T19:33:32.112387+0000 mgr.vm01.nwhpas (mgr.14227) 707 : cluster [DBG] pgmap v389: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:33.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:33 vm04 bash[34817]: cluster 2026-04-16T19:33:32.112387+0000 mgr.vm01.nwhpas (mgr.14227) 707 : cluster [DBG] pgmap v389: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:33.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:33 vm04 bash[34817]: audit 2026-04-16T19:33:32.785905+0000 mgr.vm01.nwhpas (mgr.14227) 708 : audit [DBG] from='client.16060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:33.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:33 vm04 bash[34817]: audit 2026-04-16T19:33:32.785905+0000 mgr.vm01.nwhpas (mgr.14227) 708 : audit [DBG] from='client.16060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:33.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:33 vm01 bash[28222]: cluster 2026-04-16T19:33:32.112387+0000 mgr.vm01.nwhpas (mgr.14227) 707 : cluster [DBG] pgmap v389: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:33.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:33 vm01 bash[28222]: cluster 2026-04-16T19:33:32.112387+0000 mgr.vm01.nwhpas (mgr.14227) 707 : cluster [DBG] pgmap v389: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:33.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:33 vm01 bash[28222]: audit 2026-04-16T19:33:32.785905+0000 mgr.vm01.nwhpas (mgr.14227) 708 : audit [DBG] from='client.16060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:33.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:33 vm01 bash[28222]: audit 2026-04-16T19:33:32.785905+0000 mgr.vm01.nwhpas (mgr.14227) 708 : audit [DBG] from='client.16060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:34.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:34 vm04 bash[34817]: audit 2026-04-16T19:33:33.014165+0000 mgr.vm01.nwhpas (mgr.14227) 709 : audit [DBG] from='client.16064 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:34.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:34 vm04 bash[34817]: audit 2026-04-16T19:33:33.014165+0000 mgr.vm01.nwhpas (mgr.14227) 709 : audit [DBG] from='client.16064 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:34.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:34 vm04 bash[34817]: audit 2026-04-16T19:33:33.311187+0000 mon.vm01 (mon.0) 1176 : audit [DBG] from='client.? 192.168.123.101:0/2635218899' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:34.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:34 vm04 bash[34817]: audit 2026-04-16T19:33:33.311187+0000 mon.vm01 (mon.0) 1176 : audit [DBG] from='client.? 192.168.123.101:0/2635218899' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:34.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:34 vm01 bash[28222]: audit 2026-04-16T19:33:33.014165+0000 mgr.vm01.nwhpas (mgr.14227) 709 : audit [DBG] from='client.16064 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:34.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:34 vm01 bash[28222]: audit 2026-04-16T19:33:33.014165+0000 mgr.vm01.nwhpas (mgr.14227) 709 : audit [DBG] from='client.16064 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:34.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:34 vm01 bash[28222]: audit 2026-04-16T19:33:33.311187+0000 mon.vm01 (mon.0) 1176 : audit [DBG] from='client.? 192.168.123.101:0/2635218899' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:34.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:34 vm01 bash[28222]: audit 2026-04-16T19:33:33.311187+0000 mon.vm01 (mon.0) 1176 : audit [DBG] from='client.? 192.168.123.101:0/2635218899' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:35.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:35 vm04 bash[34817]: cluster 2026-04-16T19:33:34.112831+0000 mgr.vm01.nwhpas (mgr.14227) 710 : cluster [DBG] pgmap v390: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:35.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:35 vm04 bash[34817]: cluster 2026-04-16T19:33:34.112831+0000 mgr.vm01.nwhpas (mgr.14227) 710 : cluster [DBG] pgmap v390: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:35 vm01 bash[28222]: cluster 2026-04-16T19:33:34.112831+0000 mgr.vm01.nwhpas (mgr.14227) 710 : cluster [DBG] pgmap v390: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:35.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:35 vm01 bash[28222]: cluster 2026-04-16T19:33:34.112831+0000 mgr.vm01.nwhpas (mgr.14227) 710 : cluster [DBG] pgmap v390: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:36.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:36 vm04 bash[34817]: audit 2026-04-16T19:33:36.122112+0000 mon.vm01 (mon.0) 1177 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:33:36.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:36 vm04 bash[34817]: audit 2026-04-16T19:33:36.122112+0000 mon.vm01 (mon.0) 1177 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:33:36.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:36 vm01 bash[28222]: audit 2026-04-16T19:33:36.122112+0000 mon.vm01 (mon.0) 1177 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:33:36.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:36 vm01 bash[28222]: audit 2026-04-16T19:33:36.122112+0000 mon.vm01 (mon.0) 1177 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: cluster 2026-04-16T19:33:36.113353+0000 mgr.vm01.nwhpas (mgr.14227) 711 : cluster [DBG] pgmap v391: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: cluster 2026-04-16T19:33:36.113353+0000 mgr.vm01.nwhpas (mgr.14227) 711 : cluster [DBG] pgmap v391: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.496602+0000 mon.vm01 (mon.0) 1178 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.496602+0000 mon.vm01 (mon.0) 1178 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.496872+0000 mon.vm01 (mon.0) 1179 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.496872+0000 mon.vm01 (mon.0) 1179 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.497445+0000 mon.vm01 (mon.0) 1180 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.497445+0000 mon.vm01 (mon.0) 1180 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.497894+0000 mon.vm01 (mon.0) 1181 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.497894+0000 mon.vm01 (mon.0) 1181 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: cluster 2026-04-16T19:33:36.499110+0000 mgr.vm01.nwhpas (mgr.14227) 712 : cluster [DBG] pgmap v392: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: cluster 2026-04-16T19:33:36.499110+0000 mgr.vm01.nwhpas (mgr.14227) 712 : cluster [DBG] pgmap v392: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.503766+0000 mon.vm01 (mon.0) 1182 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.503766+0000 mon.vm01 (mon.0) 1182 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.505806+0000 mon.vm01 (mon.0) 1183 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:33:37.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:37 vm04 bash[34817]: audit 2026-04-16T19:33:36.505806+0000 mon.vm01 (mon.0) 1183 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:33:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: cluster 2026-04-16T19:33:36.113353+0000 mgr.vm01.nwhpas (mgr.14227) 711 : cluster [DBG] pgmap v391: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: cluster 2026-04-16T19:33:36.113353+0000 mgr.vm01.nwhpas (mgr.14227) 711 : cluster [DBG] pgmap v391: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.496602+0000 mon.vm01 (mon.0) 1178 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-16T19:33:37.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.496602+0000 mon.vm01 (mon.0) 1178 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"} : dispatch 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.496872+0000 mon.vm01 (mon.0) 1179 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.496872+0000 mon.vm01 (mon.0) 1179 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.497445+0000 mon.vm01 (mon.0) 1180 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.497445+0000 mon.vm01 (mon.0) 1180 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.497894+0000 mon.vm01 (mon.0) 1181 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.497894+0000 mon.vm01 (mon.0) 1181 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: cluster 2026-04-16T19:33:36.499110+0000 mgr.vm01.nwhpas (mgr.14227) 712 : cluster [DBG] pgmap v392: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: cluster 2026-04-16T19:33:36.499110+0000 mgr.vm01.nwhpas (mgr.14227) 712 : cluster [DBG] pgmap v392: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.503766+0000 mon.vm01 (mon.0) 1182 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.503766+0000 mon.vm01 (mon.0) 1182 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.505806+0000 mon.vm01 (mon.0) 1183 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:33:37.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:37 vm01 bash[28222]: audit 2026-04-16T19:33:36.505806+0000 mon.vm01 (mon.0) 1183 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:33:38.554 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:33:38.710 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:38 vm04 bash[34817]: audit 2026-04-16T19:33:37.578904+0000 mon.vm01 (mon.0) 1184 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:38.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:38 vm04 bash[34817]: audit 2026-04-16T19:33:37.578904+0000 mon.vm01 (mon.0) 1184 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:38.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:38 vm01 bash[28222]: audit 2026-04-16T19:33:37.578904+0000 mon.vm01 (mon.0) 1184 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:38.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:38 vm01 bash[28222]: audit 2026-04-16T19:33:37.578904+0000 mon.vm01 (mon.0) 1184 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:38.747 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:33:38.748 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (9m) 4m ago 10m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:33:38.748 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (4m) 4m ago 10m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:33:38.748 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 4m ago 10m - - 2026-04-16T19:33:38.748 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (10m) 4m ago 10m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:33:39.005 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:33:39.005 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:33:39.005 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:33:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:39 vm04 bash[34817]: cluster 2026-04-16T19:33:38.499577+0000 mgr.vm01.nwhpas (mgr.14227) 713 : cluster [DBG] pgmap v393: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:39 vm04 bash[34817]: cluster 2026-04-16T19:33:38.499577+0000 mgr.vm01.nwhpas (mgr.14227) 713 : cluster [DBG] pgmap v393: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:39 vm04 bash[34817]: audit 2026-04-16T19:33:38.531471+0000 mgr.vm01.nwhpas (mgr.14227) 714 : audit [DBG] from='client.16072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:39 vm04 bash[34817]: audit 2026-04-16T19:33:38.531471+0000 mgr.vm01.nwhpas (mgr.14227) 714 : audit [DBG] from='client.16072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:39 vm04 bash[34817]: audit 2026-04-16T19:33:38.739912+0000 mgr.vm01.nwhpas (mgr.14227) 715 : audit [DBG] from='client.16076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:39 vm04 bash[34817]: audit 2026-04-16T19:33:38.739912+0000 mgr.vm01.nwhpas (mgr.14227) 715 : audit [DBG] from='client.16076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:39 vm04 bash[34817]: audit 2026-04-16T19:33:38.999887+0000 mon.vm04 (mon.1) 38 : audit [DBG] from='client.? 192.168.123.101:0/1007920340' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:39 vm04 bash[34817]: audit 2026-04-16T19:33:38.999887+0000 mon.vm04 (mon.1) 38 : audit [DBG] from='client.? 192.168.123.101:0/1007920340' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:39.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:39 vm01 bash[28222]: cluster 2026-04-16T19:33:38.499577+0000 mgr.vm01.nwhpas (mgr.14227) 713 : cluster [DBG] pgmap v393: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:39.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:39 vm01 bash[28222]: cluster 2026-04-16T19:33:38.499577+0000 mgr.vm01.nwhpas (mgr.14227) 713 : cluster [DBG] pgmap v393: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:39.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:39 vm01 bash[28222]: audit 2026-04-16T19:33:38.531471+0000 mgr.vm01.nwhpas (mgr.14227) 714 : audit [DBG] from='client.16072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:39.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:39 vm01 bash[28222]: audit 2026-04-16T19:33:38.531471+0000 mgr.vm01.nwhpas (mgr.14227) 714 : audit [DBG] from='client.16072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:39.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:39 vm01 bash[28222]: audit 2026-04-16T19:33:38.739912+0000 mgr.vm01.nwhpas (mgr.14227) 715 : audit [DBG] from='client.16076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:39.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:39 vm01 bash[28222]: audit 2026-04-16T19:33:38.739912+0000 mgr.vm01.nwhpas (mgr.14227) 715 : audit [DBG] from='client.16076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:39.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:39 vm01 bash[28222]: audit 2026-04-16T19:33:38.999887+0000 mon.vm04 (mon.1) 38 : audit [DBG] from='client.? 192.168.123.101:0/1007920340' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:39.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:39 vm01 bash[28222]: audit 2026-04-16T19:33:38.999887+0000 mon.vm04 (mon.1) 38 : audit [DBG] from='client.? 192.168.123.101:0/1007920340' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:41.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:41 vm04 bash[34817]: cluster 2026-04-16T19:33:40.500013+0000 mgr.vm01.nwhpas (mgr.14227) 716 : cluster [DBG] pgmap v394: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:41.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:41 vm04 bash[34817]: cluster 2026-04-16T19:33:40.500013+0000 mgr.vm01.nwhpas (mgr.14227) 716 : cluster [DBG] pgmap v394: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:41.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:41 vm01 bash[28222]: cluster 2026-04-16T19:33:40.500013+0000 mgr.vm01.nwhpas (mgr.14227) 716 : cluster [DBG] pgmap v394: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:41.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:41 vm01 bash[28222]: cluster 2026-04-16T19:33:40.500013+0000 mgr.vm01.nwhpas (mgr.14227) 716 : cluster [DBG] pgmap v394: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:43 vm04 bash[34817]: cluster 2026-04-16T19:33:42.500504+0000 mgr.vm01.nwhpas (mgr.14227) 717 : cluster [DBG] pgmap v395: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:43 vm04 bash[34817]: cluster 2026-04-16T19:33:42.500504+0000 mgr.vm01.nwhpas (mgr.14227) 717 : cluster [DBG] pgmap v395: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:43.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:43 vm01 bash[28222]: cluster 2026-04-16T19:33:42.500504+0000 mgr.vm01.nwhpas (mgr.14227) 717 : cluster [DBG] pgmap v395: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:43.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:43 vm01 bash[28222]: cluster 2026-04-16T19:33:42.500504+0000 mgr.vm01.nwhpas (mgr.14227) 717 : cluster [DBG] pgmap v395: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:33:44.227 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:33:44.417 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:33:44.417 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (9m) 4m ago 10m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:33:44.417 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (4m) 4m ago 10m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:33:44.417 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 4m ago 10m - - 2026-04-16T19:33:44.417 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (10m) 4m ago 10m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:33:44.663 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:33:44.664 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:33:44.664 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:33:44.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:44 vm04 bash[34817]: audit 2026-04-16T19:33:44.658727+0000 mon.vm01 (mon.0) 1185 : audit [DBG] from='client.? 192.168.123.101:0/1950368615' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:44.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:44 vm04 bash[34817]: audit 2026-04-16T19:33:44.658727+0000 mon.vm01 (mon.0) 1185 : audit [DBG] from='client.? 192.168.123.101:0/1950368615' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:44.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:44 vm01 bash[28222]: audit 2026-04-16T19:33:44.658727+0000 mon.vm01 (mon.0) 1185 : audit [DBG] from='client.? 192.168.123.101:0/1950368615' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:44.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:44 vm01 bash[28222]: audit 2026-04-16T19:33:44.658727+0000 mon.vm01 (mon.0) 1185 : audit [DBG] from='client.? 192.168.123.101:0/1950368615' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:45.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:45 vm04 bash[34817]: audit 2026-04-16T19:33:44.205445+0000 mgr.vm01.nwhpas (mgr.14227) 718 : audit [DBG] from='client.16082 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:45.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:45 vm04 bash[34817]: audit 2026-04-16T19:33:44.205445+0000 mgr.vm01.nwhpas (mgr.14227) 718 : audit [DBG] from='client.16082 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:45.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:45 vm04 bash[34817]: audit 2026-04-16T19:33:44.410012+0000 mgr.vm01.nwhpas (mgr.14227) 719 : audit [DBG] from='client.16086 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:45.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:45 vm04 bash[34817]: audit 2026-04-16T19:33:44.410012+0000 mgr.vm01.nwhpas (mgr.14227) 719 : audit [DBG] from='client.16086 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:45.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:45 vm04 bash[34817]: cluster 2026-04-16T19:33:44.501070+0000 mgr.vm01.nwhpas (mgr.14227) 720 : cluster [DBG] pgmap v396: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:45.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:45 vm04 bash[34817]: cluster 2026-04-16T19:33:44.501070+0000 mgr.vm01.nwhpas (mgr.14227) 720 : cluster [DBG] pgmap v396: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:45.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:45 vm01 bash[28222]: audit 2026-04-16T19:33:44.205445+0000 mgr.vm01.nwhpas (mgr.14227) 718 : audit [DBG] from='client.16082 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:45.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:45 vm01 bash[28222]: audit 2026-04-16T19:33:44.205445+0000 mgr.vm01.nwhpas (mgr.14227) 718 : audit [DBG] from='client.16082 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:45.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:45 vm01 bash[28222]: audit 2026-04-16T19:33:44.410012+0000 mgr.vm01.nwhpas (mgr.14227) 719 : audit [DBG] from='client.16086 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:45.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:45 vm01 bash[28222]: audit 2026-04-16T19:33:44.410012+0000 mgr.vm01.nwhpas (mgr.14227) 719 : audit [DBG] from='client.16086 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:45.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:45 vm01 bash[28222]: cluster 2026-04-16T19:33:44.501070+0000 mgr.vm01.nwhpas (mgr.14227) 720 : cluster [DBG] pgmap v396: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:45.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:45 vm01 bash[28222]: cluster 2026-04-16T19:33:44.501070+0000 mgr.vm01.nwhpas (mgr.14227) 720 : cluster [DBG] pgmap v396: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:48.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:47 vm04 bash[34817]: cluster 2026-04-16T19:33:46.501566+0000 mgr.vm01.nwhpas (mgr.14227) 721 : cluster [DBG] pgmap v397: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:47 vm04 bash[34817]: cluster 2026-04-16T19:33:46.501566+0000 mgr.vm01.nwhpas (mgr.14227) 721 : cluster [DBG] pgmap v397: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:48.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:47 vm01 bash[28222]: cluster 2026-04-16T19:33:46.501566+0000 mgr.vm01.nwhpas (mgr.14227) 721 : cluster [DBG] pgmap v397: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:48.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:47 vm01 bash[28222]: cluster 2026-04-16T19:33:46.501566+0000 mgr.vm01.nwhpas (mgr.14227) 721 : cluster [DBG] pgmap v397: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-16T19:33:49.891 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:33:50.109 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:33:50.109 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (9m) 4m ago 10m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:33:50.109 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (4m) 4m ago 10m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:33:50.109 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 4m ago 10m - - 2026-04-16T19:33:50.109 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (10m) 4m ago 10m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:33:50.210 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:49 vm04 bash[34817]: cluster 2026-04-16T19:33:48.501890+0000 mgr.vm01.nwhpas (mgr.14227) 722 : cluster [DBG] pgmap v398: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:49 vm04 bash[34817]: cluster 2026-04-16T19:33:48.501890+0000 mgr.vm01.nwhpas (mgr.14227) 722 : cluster [DBG] pgmap v398: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:50.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:49 vm01 bash[28222]: cluster 2026-04-16T19:33:48.501890+0000 mgr.vm01.nwhpas (mgr.14227) 722 : cluster [DBG] pgmap v398: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:50.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:49 vm01 bash[28222]: cluster 2026-04-16T19:33:48.501890+0000 mgr.vm01.nwhpas (mgr.14227) 722 : cluster [DBG] pgmap v398: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:50.355 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:33:50.355 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:33:50.355 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:33:51.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:50 vm04 bash[34817]: audit 2026-04-16T19:33:49.866937+0000 mgr.vm01.nwhpas (mgr.14227) 723 : audit [DBG] from='client.16094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:51.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:50 vm04 bash[34817]: audit 2026-04-16T19:33:49.866937+0000 mgr.vm01.nwhpas (mgr.14227) 723 : audit [DBG] from='client.16094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:51.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:50 vm04 bash[34817]: audit 2026-04-16T19:33:50.350632+0000 mon.vm01 (mon.0) 1186 : audit [DBG] from='client.? 192.168.123.101:0/373466002' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:51.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:50 vm04 bash[34817]: audit 2026-04-16T19:33:50.350632+0000 mon.vm01 (mon.0) 1186 : audit [DBG] from='client.? 192.168.123.101:0/373466002' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:51.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:50 vm01 bash[28222]: audit 2026-04-16T19:33:49.866937+0000 mgr.vm01.nwhpas (mgr.14227) 723 : audit [DBG] from='client.16094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:51.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:50 vm01 bash[28222]: audit 2026-04-16T19:33:49.866937+0000 mgr.vm01.nwhpas (mgr.14227) 723 : audit [DBG] from='client.16094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:51.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:50 vm01 bash[28222]: audit 2026-04-16T19:33:50.350632+0000 mon.vm01 (mon.0) 1186 : audit [DBG] from='client.? 192.168.123.101:0/373466002' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:51.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:50 vm01 bash[28222]: audit 2026-04-16T19:33:50.350632+0000 mon.vm01 (mon.0) 1186 : audit [DBG] from='client.? 192.168.123.101:0/373466002' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:52.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:51 vm04 bash[34817]: audit 2026-04-16T19:33:50.101458+0000 mgr.vm01.nwhpas (mgr.14227) 724 : audit [DBG] from='client.16098 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:52.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:51 vm04 bash[34817]: audit 2026-04-16T19:33:50.101458+0000 mgr.vm01.nwhpas (mgr.14227) 724 : audit [DBG] from='client.16098 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:52.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:51 vm04 bash[34817]: cluster 2026-04-16T19:33:50.502391+0000 mgr.vm01.nwhpas (mgr.14227) 725 : cluster [DBG] pgmap v399: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:52.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:51 vm04 bash[34817]: cluster 2026-04-16T19:33:50.502391+0000 mgr.vm01.nwhpas (mgr.14227) 725 : cluster [DBG] pgmap v399: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:52.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:51 vm01 bash[28222]: audit 2026-04-16T19:33:50.101458+0000 mgr.vm01.nwhpas (mgr.14227) 724 : audit [DBG] from='client.16098 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:52.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:51 vm01 bash[28222]: audit 2026-04-16T19:33:50.101458+0000 mgr.vm01.nwhpas (mgr.14227) 724 : audit [DBG] from='client.16098 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:52.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:51 vm01 bash[28222]: cluster 2026-04-16T19:33:50.502391+0000 mgr.vm01.nwhpas (mgr.14227) 725 : cluster [DBG] pgmap v399: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:52.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:51 vm01 bash[28222]: cluster 2026-04-16T19:33:50.502391+0000 mgr.vm01.nwhpas (mgr.14227) 725 : cluster [DBG] pgmap v399: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:52 vm04 bash[34817]: audit 2026-04-16T19:33:52.579095+0000 mon.vm01 (mon.0) 1187 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:52 vm04 bash[34817]: audit 2026-04-16T19:33:52.579095+0000 mon.vm01 (mon.0) 1187 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:53.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:52 vm01 bash[28222]: audit 2026-04-16T19:33:52.579095+0000 mon.vm01 (mon.0) 1187 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:53.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:52 vm01 bash[28222]: audit 2026-04-16T19:33:52.579095+0000 mon.vm01 (mon.0) 1187 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:33:54.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:53 vm04 bash[34817]: cluster 2026-04-16T19:33:52.502809+0000 mgr.vm01.nwhpas (mgr.14227) 726 : cluster [DBG] pgmap v400: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:54.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:53 vm04 bash[34817]: cluster 2026-04-16T19:33:52.502809+0000 mgr.vm01.nwhpas (mgr.14227) 726 : cluster [DBG] pgmap v400: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:54.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:53 vm01 bash[28222]: cluster 2026-04-16T19:33:52.502809+0000 mgr.vm01.nwhpas (mgr.14227) 726 : cluster [DBG] pgmap v400: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:54.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:53 vm01 bash[28222]: cluster 2026-04-16T19:33:52.502809+0000 mgr.vm01.nwhpas (mgr.14227) 726 : cluster [DBG] pgmap v400: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:55.577 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:33:55.763 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:33:55.763 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 4m ago 10m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:33:55.763 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (4m) 4m ago 10m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:33:55.763 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 4m ago 10m - - 2026-04-16T19:33:55.763 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (10m) 4m ago 10m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:33:56.026 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:33:56.026 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:33:56.026 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:33:56.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:55 vm04 bash[34817]: cluster 2026-04-16T19:33:54.503249+0000 mgr.vm01.nwhpas (mgr.14227) 727 : cluster [DBG] pgmap v401: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:56.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:55 vm04 bash[34817]: cluster 2026-04-16T19:33:54.503249+0000 mgr.vm01.nwhpas (mgr.14227) 727 : cluster [DBG] pgmap v401: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:56.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:55 vm01 bash[28222]: cluster 2026-04-16T19:33:54.503249+0000 mgr.vm01.nwhpas (mgr.14227) 727 : cluster [DBG] pgmap v401: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:56.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:55 vm01 bash[28222]: cluster 2026-04-16T19:33:54.503249+0000 mgr.vm01.nwhpas (mgr.14227) 727 : cluster [DBG] pgmap v401: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:33:57.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:56 vm04 bash[34817]: audit 2026-04-16T19:33:55.552950+0000 mgr.vm01.nwhpas (mgr.14227) 728 : audit [DBG] from='client.16106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:57.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:56 vm04 bash[34817]: audit 2026-04-16T19:33:55.552950+0000 mgr.vm01.nwhpas (mgr.14227) 728 : audit [DBG] from='client.16106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:57.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:56 vm04 bash[34817]: audit 2026-04-16T19:33:55.755860+0000 mgr.vm01.nwhpas (mgr.14227) 729 : audit [DBG] from='client.16110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:57.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:56 vm04 bash[34817]: audit 2026-04-16T19:33:55.755860+0000 mgr.vm01.nwhpas (mgr.14227) 729 : audit [DBG] from='client.16110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:57.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:56 vm04 bash[34817]: audit 2026-04-16T19:33:56.021193+0000 mon.vm01 (mon.0) 1188 : audit [DBG] from='client.? 192.168.123.101:0/1971740065' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:57.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:56 vm04 bash[34817]: audit 2026-04-16T19:33:56.021193+0000 mon.vm01 (mon.0) 1188 : audit [DBG] from='client.? 192.168.123.101:0/1971740065' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:57.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:56 vm01 bash[28222]: audit 2026-04-16T19:33:55.552950+0000 mgr.vm01.nwhpas (mgr.14227) 728 : audit [DBG] from='client.16106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:57.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:56 vm01 bash[28222]: audit 2026-04-16T19:33:55.552950+0000 mgr.vm01.nwhpas (mgr.14227) 728 : audit [DBG] from='client.16106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:57.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:56 vm01 bash[28222]: audit 2026-04-16T19:33:55.755860+0000 mgr.vm01.nwhpas (mgr.14227) 729 : audit [DBG] from='client.16110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:57.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:56 vm01 bash[28222]: audit 2026-04-16T19:33:55.755860+0000 mgr.vm01.nwhpas (mgr.14227) 729 : audit [DBG] from='client.16110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:33:57.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:56 vm01 bash[28222]: audit 2026-04-16T19:33:56.021193+0000 mon.vm01 (mon.0) 1188 : audit [DBG] from='client.? 192.168.123.101:0/1971740065' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:57.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:56 vm01 bash[28222]: audit 2026-04-16T19:33:56.021193+0000 mon.vm01 (mon.0) 1188 : audit [DBG] from='client.? 192.168.123.101:0/1971740065' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:33:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:57 vm04 bash[34817]: cluster 2026-04-16T19:33:56.503660+0000 mgr.vm01.nwhpas (mgr.14227) 730 : cluster [DBG] pgmap v402: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:57 vm04 bash[34817]: cluster 2026-04-16T19:33:56.503660+0000 mgr.vm01.nwhpas (mgr.14227) 730 : cluster [DBG] pgmap v402: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:58.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:57 vm01 bash[28222]: cluster 2026-04-16T19:33:56.503660+0000 mgr.vm01.nwhpas (mgr.14227) 730 : cluster [DBG] pgmap v402: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:58.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:57 vm01 bash[28222]: cluster 2026-04-16T19:33:56.503660+0000 mgr.vm01.nwhpas (mgr.14227) 730 : cluster [DBG] pgmap v402: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:33:59.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:58 vm04 bash[34817]: cluster 2026-04-16T19:33:58.504109+0000 mgr.vm01.nwhpas (mgr.14227) 731 : cluster [DBG] pgmap v403: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 341 B/s wr, 17 op/s 2026-04-16T19:33:59.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:33:58 vm04 bash[34817]: cluster 2026-04-16T19:33:58.504109+0000 mgr.vm01.nwhpas (mgr.14227) 731 : cluster [DBG] pgmap v403: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 341 B/s wr, 17 op/s 2026-04-16T19:33:59.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:58 vm01 bash[28222]: cluster 2026-04-16T19:33:58.504109+0000 mgr.vm01.nwhpas (mgr.14227) 731 : cluster [DBG] pgmap v403: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 341 B/s wr, 17 op/s 2026-04-16T19:33:59.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:33:58 vm01 bash[28222]: cluster 2026-04-16T19:33:58.504109+0000 mgr.vm01.nwhpas (mgr.14227) 731 : cluster [DBG] pgmap v403: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 341 B/s wr, 17 op/s 2026-04-16T19:34:01.242 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:34:01.431 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:34:01.431 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 4m ago 10m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:34:01.431 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (4m) 4m ago 10m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:34:01.431 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 4m ago 10m - - 2026-04-16T19:34:01.431 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (10m) 4m ago 10m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:34:01.666 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:34:01.666 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:34:01.666 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:34:01.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:01 vm04 bash[34817]: cluster 2026-04-16T19:34:00.504517+0000 mgr.vm01.nwhpas (mgr.14227) 732 : cluster [DBG] pgmap v404: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 42 op/s 2026-04-16T19:34:01.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:01 vm04 bash[34817]: cluster 2026-04-16T19:34:00.504517+0000 mgr.vm01.nwhpas (mgr.14227) 732 : cluster [DBG] pgmap v404: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 42 op/s 2026-04-16T19:34:01.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:01 vm01 bash[28222]: cluster 2026-04-16T19:34:00.504517+0000 mgr.vm01.nwhpas (mgr.14227) 732 : cluster [DBG] pgmap v404: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 42 op/s 2026-04-16T19:34:01.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:01 vm01 bash[28222]: cluster 2026-04-16T19:34:00.504517+0000 mgr.vm01.nwhpas (mgr.14227) 732 : cluster [DBG] pgmap v404: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 42 op/s 2026-04-16T19:34:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:02 vm04 bash[34817]: audit 2026-04-16T19:34:01.218673+0000 mgr.vm01.nwhpas (mgr.14227) 733 : audit [DBG] from='client.16118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:02 vm04 bash[34817]: audit 2026-04-16T19:34:01.218673+0000 mgr.vm01.nwhpas (mgr.14227) 733 : audit [DBG] from='client.16118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:02 vm04 bash[34817]: audit 2026-04-16T19:34:01.423335+0000 mgr.vm01.nwhpas (mgr.14227) 734 : audit [DBG] from='client.16122 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:02 vm04 bash[34817]: audit 2026-04-16T19:34:01.423335+0000 mgr.vm01.nwhpas (mgr.14227) 734 : audit [DBG] from='client.16122 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:02 vm04 bash[34817]: audit 2026-04-16T19:34:01.661477+0000 mon.vm01 (mon.0) 1189 : audit [DBG] from='client.? 192.168.123.101:0/2688611695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:02 vm04 bash[34817]: audit 2026-04-16T19:34:01.661477+0000 mon.vm01 (mon.0) 1189 : audit [DBG] from='client.? 192.168.123.101:0/2688611695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:02 vm01 bash[28222]: audit 2026-04-16T19:34:01.218673+0000 mgr.vm01.nwhpas (mgr.14227) 733 : audit [DBG] from='client.16118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:02 vm01 bash[28222]: audit 2026-04-16T19:34:01.218673+0000 mgr.vm01.nwhpas (mgr.14227) 733 : audit [DBG] from='client.16118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:02 vm01 bash[28222]: audit 2026-04-16T19:34:01.423335+0000 mgr.vm01.nwhpas (mgr.14227) 734 : audit [DBG] from='client.16122 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:02 vm01 bash[28222]: audit 2026-04-16T19:34:01.423335+0000 mgr.vm01.nwhpas (mgr.14227) 734 : audit [DBG] from='client.16122 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:02 vm01 bash[28222]: audit 2026-04-16T19:34:01.661477+0000 mon.vm01 (mon.0) 1189 : audit [DBG] from='client.? 192.168.123.101:0/2688611695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:02.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:02 vm01 bash[28222]: audit 2026-04-16T19:34:01.661477+0000 mon.vm01 (mon.0) 1189 : audit [DBG] from='client.? 192.168.123.101:0/2688611695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:03.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:03 vm04 bash[34817]: cluster 2026-04-16T19:34:02.504886+0000 mgr.vm01.nwhpas (mgr.14227) 735 : cluster [DBG] pgmap v405: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:03.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:03 vm04 bash[34817]: cluster 2026-04-16T19:34:02.504886+0000 mgr.vm01.nwhpas (mgr.14227) 735 : cluster [DBG] pgmap v405: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:03.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:03 vm01 bash[28222]: cluster 2026-04-16T19:34:02.504886+0000 mgr.vm01.nwhpas (mgr.14227) 735 : cluster [DBG] pgmap v405: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:03.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:03 vm01 bash[28222]: cluster 2026-04-16T19:34:02.504886+0000 mgr.vm01.nwhpas (mgr.14227) 735 : cluster [DBG] pgmap v405: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:05.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:05 vm04 bash[34817]: cluster 2026-04-16T19:34:04.505447+0000 mgr.vm01.nwhpas (mgr.14227) 736 : cluster [DBG] pgmap v406: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:05.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:05 vm04 bash[34817]: cluster 2026-04-16T19:34:04.505447+0000 mgr.vm01.nwhpas (mgr.14227) 736 : cluster [DBG] pgmap v406: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:05.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:05 vm01 bash[28222]: cluster 2026-04-16T19:34:04.505447+0000 mgr.vm01.nwhpas (mgr.14227) 736 : cluster [DBG] pgmap v406: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:05.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:05 vm01 bash[28222]: cluster 2026-04-16T19:34:04.505447+0000 mgr.vm01.nwhpas (mgr.14227) 736 : cluster [DBG] pgmap v406: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:06.881 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:34:07.081 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:34:07.081 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 4m ago 11m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:34:07.081 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (4m) 4m ago 11m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:34:07.081 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 4m ago 11m - - 2026-04-16T19:34:07.081 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (11m) 4m ago 11m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:34:07.362 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:34:07.362 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:34:07.362 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:34:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:07 vm04 bash[34817]: cluster 2026-04-16T19:34:06.505860+0000 mgr.vm01.nwhpas (mgr.14227) 737 : cluster [DBG] pgmap v407: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:07 vm04 bash[34817]: cluster 2026-04-16T19:34:06.505860+0000 mgr.vm01.nwhpas (mgr.14227) 737 : cluster [DBG] pgmap v407: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:07 vm04 bash[34817]: audit 2026-04-16T19:34:06.855535+0000 mgr.vm01.nwhpas (mgr.14227) 738 : audit [DBG] from='client.16130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:07 vm04 bash[34817]: audit 2026-04-16T19:34:06.855535+0000 mgr.vm01.nwhpas (mgr.14227) 738 : audit [DBG] from='client.16130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:07 vm04 bash[34817]: audit 2026-04-16T19:34:07.356985+0000 mon.vm01 (mon.0) 1190 : audit [DBG] from='client.? 192.168.123.101:0/2333466430' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:07 vm04 bash[34817]: audit 2026-04-16T19:34:07.356985+0000 mon.vm01 (mon.0) 1190 : audit [DBG] from='client.? 192.168.123.101:0/2333466430' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:07 vm04 bash[34817]: audit 2026-04-16T19:34:07.579329+0000 mon.vm01 (mon.0) 1191 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:07 vm04 bash[34817]: audit 2026-04-16T19:34:07.579329+0000 mon.vm01 (mon.0) 1191 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:07 vm01 bash[28222]: cluster 2026-04-16T19:34:06.505860+0000 mgr.vm01.nwhpas (mgr.14227) 737 : cluster [DBG] pgmap v407: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:07 vm01 bash[28222]: cluster 2026-04-16T19:34:06.505860+0000 mgr.vm01.nwhpas (mgr.14227) 737 : cluster [DBG] pgmap v407: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:07 vm01 bash[28222]: audit 2026-04-16T19:34:06.855535+0000 mgr.vm01.nwhpas (mgr.14227) 738 : audit [DBG] from='client.16130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:07 vm01 bash[28222]: audit 2026-04-16T19:34:06.855535+0000 mgr.vm01.nwhpas (mgr.14227) 738 : audit [DBG] from='client.16130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:07 vm01 bash[28222]: audit 2026-04-16T19:34:07.356985+0000 mon.vm01 (mon.0) 1190 : audit [DBG] from='client.? 192.168.123.101:0/2333466430' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:07 vm01 bash[28222]: audit 2026-04-16T19:34:07.356985+0000 mon.vm01 (mon.0) 1190 : audit [DBG] from='client.? 192.168.123.101:0/2333466430' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:07 vm01 bash[28222]: audit 2026-04-16T19:34:07.579329+0000 mon.vm01 (mon.0) 1191 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:07 vm01 bash[28222]: audit 2026-04-16T19:34:07.579329+0000 mon.vm01 (mon.0) 1191 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:08.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:08 vm04 bash[34817]: audit 2026-04-16T19:34:07.073437+0000 mgr.vm01.nwhpas (mgr.14227) 739 : audit [DBG] from='client.16134 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:08.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:08 vm04 bash[34817]: audit 2026-04-16T19:34:07.073437+0000 mgr.vm01.nwhpas (mgr.14227) 739 : audit [DBG] from='client.16134 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:08 vm01 bash[28222]: audit 2026-04-16T19:34:07.073437+0000 mgr.vm01.nwhpas (mgr.14227) 739 : audit [DBG] from='client.16134 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:08.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:08 vm01 bash[28222]: audit 2026-04-16T19:34:07.073437+0000 mgr.vm01.nwhpas (mgr.14227) 739 : audit [DBG] from='client.16134 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:09 vm04 bash[34817]: cluster 2026-04-16T19:34:08.506368+0000 mgr.vm01.nwhpas (mgr.14227) 740 : cluster [DBG] pgmap v408: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:09 vm04 bash[34817]: cluster 2026-04-16T19:34:08.506368+0000 mgr.vm01.nwhpas (mgr.14227) 740 : cluster [DBG] pgmap v408: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:09.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:09 vm01 bash[28222]: cluster 2026-04-16T19:34:08.506368+0000 mgr.vm01.nwhpas (mgr.14227) 740 : cluster [DBG] pgmap v408: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:09.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:09 vm01 bash[28222]: cluster 2026-04-16T19:34:08.506368+0000 mgr.vm01.nwhpas (mgr.14227) 740 : cluster [DBG] pgmap v408: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:34:11.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:11 vm04 bash[34817]: cluster 2026-04-16T19:34:10.506919+0000 mgr.vm01.nwhpas (mgr.14227) 741 : cluster [DBG] pgmap v409: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 0 B/s wr, 42 op/s 2026-04-16T19:34:11.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:11 vm04 bash[34817]: cluster 2026-04-16T19:34:10.506919+0000 mgr.vm01.nwhpas (mgr.14227) 741 : cluster [DBG] pgmap v409: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 0 B/s wr, 42 op/s 2026-04-16T19:34:11.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:11 vm01 bash[28222]: cluster 2026-04-16T19:34:10.506919+0000 mgr.vm01.nwhpas (mgr.14227) 741 : cluster [DBG] pgmap v409: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 0 B/s wr, 42 op/s 2026-04-16T19:34:11.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:11 vm01 bash[28222]: cluster 2026-04-16T19:34:10.506919+0000 mgr.vm01.nwhpas (mgr.14227) 741 : cluster [DBG] pgmap v409: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 0 B/s wr, 42 op/s 2026-04-16T19:34:12.586 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:34:12.801 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:34:12.801 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 4m ago 11m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:34:12.801 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (4m) 4m ago 11m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:34:12.801 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 4m ago 11m - - 2026-04-16T19:34:12.801 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (11m) 4m ago 11m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:34:13.058 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:34:13.058 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:34:13.058 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:34:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:13 vm04 bash[34817]: cluster 2026-04-16T19:34:12.507348+0000 mgr.vm01.nwhpas (mgr.14227) 742 : cluster [DBG] pgmap v410: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 0 B/s wr, 17 op/s 2026-04-16T19:34:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:13 vm04 bash[34817]: cluster 2026-04-16T19:34:12.507348+0000 mgr.vm01.nwhpas (mgr.14227) 742 : cluster [DBG] pgmap v410: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 0 B/s wr, 17 op/s 2026-04-16T19:34:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:13 vm04 bash[34817]: audit 2026-04-16T19:34:12.564228+0000 mgr.vm01.nwhpas (mgr.14227) 743 : audit [DBG] from='client.16142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:13 vm04 bash[34817]: audit 2026-04-16T19:34:12.564228+0000 mgr.vm01.nwhpas (mgr.14227) 743 : audit [DBG] from='client.16142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:13 vm04 bash[34817]: audit 2026-04-16T19:34:12.793315+0000 mgr.vm01.nwhpas (mgr.14227) 744 : audit [DBG] from='client.16146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:13 vm04 bash[34817]: audit 2026-04-16T19:34:12.793315+0000 mgr.vm01.nwhpas (mgr.14227) 744 : audit [DBG] from='client.16146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:13 vm04 bash[34817]: audit 2026-04-16T19:34:13.053201+0000 mon.vm01 (mon.0) 1192 : audit [DBG] from='client.? 192.168.123.101:0/3948013095' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:13 vm04 bash[34817]: audit 2026-04-16T19:34:13.053201+0000 mon.vm01 (mon.0) 1192 : audit [DBG] from='client.? 192.168.123.101:0/3948013095' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:13.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:13 vm01 bash[28222]: cluster 2026-04-16T19:34:12.507348+0000 mgr.vm01.nwhpas (mgr.14227) 742 : cluster [DBG] pgmap v410: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 0 B/s wr, 17 op/s 2026-04-16T19:34:13.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:13 vm01 bash[28222]: cluster 2026-04-16T19:34:12.507348+0000 mgr.vm01.nwhpas (mgr.14227) 742 : cluster [DBG] pgmap v410: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 0 B/s wr, 17 op/s 2026-04-16T19:34:13.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:13 vm01 bash[28222]: audit 2026-04-16T19:34:12.564228+0000 mgr.vm01.nwhpas (mgr.14227) 743 : audit [DBG] from='client.16142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:13.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:13 vm01 bash[28222]: audit 2026-04-16T19:34:12.564228+0000 mgr.vm01.nwhpas (mgr.14227) 743 : audit [DBG] from='client.16142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:13.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:13 vm01 bash[28222]: audit 2026-04-16T19:34:12.793315+0000 mgr.vm01.nwhpas (mgr.14227) 744 : audit [DBG] from='client.16146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:13.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:13 vm01 bash[28222]: audit 2026-04-16T19:34:12.793315+0000 mgr.vm01.nwhpas (mgr.14227) 744 : audit [DBG] from='client.16146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:13.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:13 vm01 bash[28222]: audit 2026-04-16T19:34:13.053201+0000 mon.vm01 (mon.0) 1192 : audit [DBG] from='client.? 192.168.123.101:0/3948013095' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:13.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:13 vm01 bash[28222]: audit 2026-04-16T19:34:13.053201+0000 mon.vm01 (mon.0) 1192 : audit [DBG] from='client.? 192.168.123.101:0/3948013095' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:15.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:15 vm04 bash[34817]: cluster 2026-04-16T19:34:14.507859+0000 mgr.vm01.nwhpas (mgr.14227) 745 : cluster [DBG] pgmap v411: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:34:15.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:15 vm04 bash[34817]: cluster 2026-04-16T19:34:14.507859+0000 mgr.vm01.nwhpas (mgr.14227) 745 : cluster [DBG] pgmap v411: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:34:15.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:15 vm01 bash[28222]: cluster 2026-04-16T19:34:14.507859+0000 mgr.vm01.nwhpas (mgr.14227) 745 : cluster [DBG] pgmap v411: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:34:15.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:15 vm01 bash[28222]: cluster 2026-04-16T19:34:14.507859+0000 mgr.vm01.nwhpas (mgr.14227) 745 : cluster [DBG] pgmap v411: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:34:17.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:17 vm04 bash[34817]: cluster 2026-04-16T19:34:16.508286+0000 mgr.vm01.nwhpas (mgr.14227) 746 : cluster [DBG] pgmap v412: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:34:17.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:17 vm04 bash[34817]: cluster 2026-04-16T19:34:16.508286+0000 mgr.vm01.nwhpas (mgr.14227) 746 : cluster [DBG] pgmap v412: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:34:17.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:17 vm01 bash[28222]: cluster 2026-04-16T19:34:16.508286+0000 mgr.vm01.nwhpas (mgr.14227) 746 : cluster [DBG] pgmap v412: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:34:17.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:17 vm01 bash[28222]: cluster 2026-04-16T19:34:16.508286+0000 mgr.vm01.nwhpas (mgr.14227) 746 : cluster [DBG] pgmap v412: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:34:18.294 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to stop 2026-04-16T19:34:18.506 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:34:18.506 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 5m ago 11m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:34:18.506 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5m) 5m ago 11m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:34:18.506 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 4m ago 11m - - 2026-04-16T19:34:18.506 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (11m) 4m ago 11m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:34:18.769 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:34:18.769 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:34:18.769 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:34:19.155 INFO:teuthology.orchestra.run.vm01.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-16T19:34:19.155 INFO:teuthology.orchestra.run.vm01.stderr: Dload Upload Total Spent Left Speed 2026-04-16T19:34:19.156 INFO:teuthology.orchestra.run.vm01.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-16T19:34:19.376 INFO:teuthology.orchestra.run.vm01.stdout:anonymousScheduled to start rgw.foo.vm04.rpimxa on host 'vm04' 2026-04-16T19:34:19.628 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.rpimxa to start 2026-04-16T19:34:19.837 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:34:19.838 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 5m ago 11m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:34:19.838 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5m) 5m ago 11m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:34:19.838 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 error 4m ago 11m - - 2026-04-16T19:34:19.838 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (11m) 4m ago 11m 121M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:18.268293+0000 mgr.vm01.nwhpas (mgr.14227) 747 : audit [DBG] from='client.16154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:18.268293+0000 mgr.vm01.nwhpas (mgr.14227) 747 : audit [DBG] from='client.16154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:18.498519+0000 mgr.vm01.nwhpas (mgr.14227) 748 : audit [DBG] from='client.16158 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:18.498519+0000 mgr.vm01.nwhpas (mgr.14227) 748 : audit [DBG] from='client.16158 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: cluster 2026-04-16T19:34:18.508700+0000 mgr.vm01.nwhpas (mgr.14227) 749 : cluster [DBG] pgmap v413: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: cluster 2026-04-16T19:34:18.508700+0000 mgr.vm01.nwhpas (mgr.14227) 749 : cluster [DBG] pgmap v413: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:18.764316+0000 mon.vm01 (mon.0) 1193 : audit [DBG] from='client.? 192.168.123.101:0/3321014278' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:18.764316+0000 mon.vm01 (mon.0) 1193 : audit [DBG] from='client.? 192.168.123.101:0/3321014278' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.364567+0000 mon.vm01 (mon.0) 1194 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.364567+0000 mon.vm01 (mon.0) 1194 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.370766+0000 mon.vm01 (mon.0) 1195 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.370766+0000 mon.vm01 (mon.0) 1195 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.371473+0000 mon.vm01 (mon.0) 1196 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.371473+0000 mon.vm01 (mon.0) 1196 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.372494+0000 mon.vm01 (mon.0) 1197 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.372494+0000 mon.vm01 (mon.0) 1197 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.372985+0000 mon.vm01 (mon.0) 1198 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.372985+0000 mon.vm01 (mon.0) 1198 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.377905+0000 mon.vm01 (mon.0) 1199 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.377905+0000 mon.vm01 (mon.0) 1199 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.379266+0000 mon.vm01 (mon.0) 1200 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:19.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:19 vm01 bash[28222]: audit 2026-04-16T19:34:19.379266+0000 mon.vm01 (mon.0) 1200 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:20.046 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:18.268293+0000 mgr.vm01.nwhpas (mgr.14227) 747 : audit [DBG] from='client.16154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:18.268293+0000 mgr.vm01.nwhpas (mgr.14227) 747 : audit [DBG] from='client.16154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:18.498519+0000 mgr.vm01.nwhpas (mgr.14227) 748 : audit [DBG] from='client.16158 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:18.498519+0000 mgr.vm01.nwhpas (mgr.14227) 748 : audit [DBG] from='client.16158 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: cluster 2026-04-16T19:34:18.508700+0000 mgr.vm01.nwhpas (mgr.14227) 749 : cluster [DBG] pgmap v413: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: cluster 2026-04-16T19:34:18.508700+0000 mgr.vm01.nwhpas (mgr.14227) 749 : cluster [DBG] pgmap v413: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:18.764316+0000 mon.vm01 (mon.0) 1193 : audit [DBG] from='client.? 192.168.123.101:0/3321014278' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:18.764316+0000 mon.vm01 (mon.0) 1193 : audit [DBG] from='client.? 192.168.123.101:0/3321014278' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.364567+0000 mon.vm01 (mon.0) 1194 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.364567+0000 mon.vm01 (mon.0) 1194 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.370766+0000 mon.vm01 (mon.0) 1195 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.370766+0000 mon.vm01 (mon.0) 1195 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.371473+0000 mon.vm01 (mon.0) 1196 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.371473+0000 mon.vm01 (mon.0) 1196 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.372494+0000 mon.vm01 (mon.0) 1197 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.372494+0000 mon.vm01 (mon.0) 1197 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.372985+0000 mon.vm01 (mon.0) 1198 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.372985+0000 mon.vm01 (mon.0) 1198 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.377905+0000 mon.vm01 (mon.0) 1199 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.377905+0000 mon.vm01 (mon.0) 1199 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.379266+0000 mon.vm01 (mon.0) 1200 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:20.047 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:19 vm04 bash[34817]: audit 2026-04-16T19:34:19.379266+0000 mon.vm01 (mon.0) 1200 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:20.123 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:34:20.123 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:34:20.123 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.rpimxa on vm04 is in error state 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:19.355795+0000 mgr.vm01.nwhpas (mgr.14227) 750 : audit [DBG] from='client.16166 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.rpimxa", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:19.355795+0000 mgr.vm01.nwhpas (mgr.14227) 750 : audit [DBG] from='client.16166 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.rpimxa", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: cephadm 2026-04-16T19:34:19.356184+0000 mgr.vm01.nwhpas (mgr.14227) 751 : cephadm [INF] Schedule start daemon rgw.foo.vm04.rpimxa 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: cephadm 2026-04-16T19:34:19.356184+0000 mgr.vm01.nwhpas (mgr.14227) 751 : cephadm [INF] Schedule start daemon rgw.foo.vm04.rpimxa 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: cluster 2026-04-16T19:34:19.373983+0000 mgr.vm01.nwhpas (mgr.14227) 752 : cluster [DBG] pgmap v414: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 188 B/s rd, 376 B/s wr, 0 op/s 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: cluster 2026-04-16T19:34:19.373983+0000 mgr.vm01.nwhpas (mgr.14227) 752 : cluster [DBG] pgmap v414: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 188 B/s rd, 376 B/s wr, 0 op/s 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: cluster 2026-04-16T19:34:19.374157+0000 mgr.vm01.nwhpas (mgr.14227) 753 : cluster [DBG] pgmap v415: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 230 B/s rd, 461 B/s wr, 0 op/s 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: cluster 2026-04-16T19:34:19.374157+0000 mgr.vm01.nwhpas (mgr.14227) 753 : cluster [DBG] pgmap v415: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 230 B/s rd, 461 B/s wr, 0 op/s 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:19.605148+0000 mgr.vm01.nwhpas (mgr.14227) 754 : audit [DBG] from='client.16170 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:19.605148+0000 mgr.vm01.nwhpas (mgr.14227) 754 : audit [DBG] from='client.16170 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:19.829238+0000 mgr.vm01.nwhpas (mgr.14227) 755 : audit [DBG] from='client.16174 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:19.829238+0000 mgr.vm01.nwhpas (mgr.14227) 755 : audit [DBG] from='client.16174 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:20.076451+0000 mon.vm01 (mon.0) 1201 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:21.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:20.076451+0000 mon.vm01 (mon.0) 1201 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:21.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:20.083890+0000 mon.vm01 (mon.0) 1202 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:21.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:20.083890+0000 mon.vm01 (mon.0) 1202 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:21.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:20.085192+0000 mon.vm01 (mon.0) 1203 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:21.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:20.085192+0000 mon.vm01 (mon.0) 1203 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:21.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:20.117827+0000 mon.vm01 (mon.0) 1204 : audit [DBG] from='client.? 192.168.123.101:0/515758150' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:21.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:20 vm04 bash[34817]: audit 2026-04-16T19:34:20.117827+0000 mon.vm01 (mon.0) 1204 : audit [DBG] from='client.? 192.168.123.101:0/515758150' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:19.355795+0000 mgr.vm01.nwhpas (mgr.14227) 750 : audit [DBG] from='client.16166 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.rpimxa", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:19.355795+0000 mgr.vm01.nwhpas (mgr.14227) 750 : audit [DBG] from='client.16166 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.rpimxa", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: cephadm 2026-04-16T19:34:19.356184+0000 mgr.vm01.nwhpas (mgr.14227) 751 : cephadm [INF] Schedule start daemon rgw.foo.vm04.rpimxa 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: cephadm 2026-04-16T19:34:19.356184+0000 mgr.vm01.nwhpas (mgr.14227) 751 : cephadm [INF] Schedule start daemon rgw.foo.vm04.rpimxa 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: cluster 2026-04-16T19:34:19.373983+0000 mgr.vm01.nwhpas (mgr.14227) 752 : cluster [DBG] pgmap v414: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 188 B/s rd, 376 B/s wr, 0 op/s 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: cluster 2026-04-16T19:34:19.373983+0000 mgr.vm01.nwhpas (mgr.14227) 752 : cluster [DBG] pgmap v414: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 188 B/s rd, 376 B/s wr, 0 op/s 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: cluster 2026-04-16T19:34:19.374157+0000 mgr.vm01.nwhpas (mgr.14227) 753 : cluster [DBG] pgmap v415: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 230 B/s rd, 461 B/s wr, 0 op/s 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: cluster 2026-04-16T19:34:19.374157+0000 mgr.vm01.nwhpas (mgr.14227) 753 : cluster [DBG] pgmap v415: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 230 B/s rd, 461 B/s wr, 0 op/s 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:19.605148+0000 mgr.vm01.nwhpas (mgr.14227) 754 : audit [DBG] from='client.16170 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:19.605148+0000 mgr.vm01.nwhpas (mgr.14227) 754 : audit [DBG] from='client.16170 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:19.829238+0000 mgr.vm01.nwhpas (mgr.14227) 755 : audit [DBG] from='client.16174 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:19.829238+0000 mgr.vm01.nwhpas (mgr.14227) 755 : audit [DBG] from='client.16174 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:20.076451+0000 mon.vm01 (mon.0) 1201 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:20.076451+0000 mon.vm01 (mon.0) 1201 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:20.083890+0000 mon.vm01 (mon.0) 1202 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:20.083890+0000 mon.vm01 (mon.0) 1202 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:20.085192+0000 mon.vm01 (mon.0) 1203 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:20.085192+0000 mon.vm01 (mon.0) 1203 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:20.117827+0000 mon.vm01 (mon.0) 1204 : audit [DBG] from='client.? 192.168.123.101:0/515758150' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:20 vm01 bash[28222]: audit 2026-04-16T19:34:20.117827+0000 mon.vm01 (mon.0) 1204 : audit [DBG] from='client.? 192.168.123.101:0/515758150' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:23.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:22 vm04 bash[34817]: cluster 2026-04-16T19:34:21.374645+0000 mgr.vm01.nwhpas (mgr.14227) 756 : cluster [DBG] pgmap v416: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 461 B/s wr, 49 op/s 2026-04-16T19:34:23.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:22 vm04 bash[34817]: cluster 2026-04-16T19:34:21.374645+0000 mgr.vm01.nwhpas (mgr.14227) 756 : cluster [DBG] pgmap v416: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 461 B/s wr, 49 op/s 2026-04-16T19:34:23.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:22 vm04 bash[34817]: audit 2026-04-16T19:34:22.579774+0000 mon.vm01 (mon.0) 1205 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:23.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:22 vm04 bash[34817]: audit 2026-04-16T19:34:22.579774+0000 mon.vm01 (mon.0) 1205 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:22 vm01 bash[28222]: cluster 2026-04-16T19:34:21.374645+0000 mgr.vm01.nwhpas (mgr.14227) 756 : cluster [DBG] pgmap v416: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 461 B/s wr, 49 op/s 2026-04-16T19:34:23.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:22 vm01 bash[28222]: cluster 2026-04-16T19:34:21.374645+0000 mgr.vm01.nwhpas (mgr.14227) 756 : cluster [DBG] pgmap v416: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 461 B/s wr, 49 op/s 2026-04-16T19:34:23.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:22 vm01 bash[28222]: audit 2026-04-16T19:34:22.579774+0000 mon.vm01 (mon.0) 1205 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:23.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:22 vm01 bash[28222]: audit 2026-04-16T19:34:22.579774+0000 mon.vm01 (mon.0) 1205 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:24.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:24 vm04 bash[34817]: cluster 2026-04-16T19:34:23.375062+0000 mgr.vm01.nwhpas (mgr.14227) 757 : cluster [DBG] pgmap v417: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 230 B/s wr, 81 op/s 2026-04-16T19:34:24.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:24 vm04 bash[34817]: cluster 2026-04-16T19:34:23.375062+0000 mgr.vm01.nwhpas (mgr.14227) 757 : cluster [DBG] pgmap v417: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 230 B/s wr, 81 op/s 2026-04-16T19:34:24.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:24 vm01 bash[28222]: cluster 2026-04-16T19:34:23.375062+0000 mgr.vm01.nwhpas (mgr.14227) 757 : cluster [DBG] pgmap v417: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 230 B/s wr, 81 op/s 2026-04-16T19:34:24.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:24 vm01 bash[28222]: cluster 2026-04-16T19:34:23.375062+0000 mgr.vm01.nwhpas (mgr.14227) 757 : cluster [DBG] pgmap v417: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 230 B/s wr, 81 op/s 2026-04-16T19:34:25.354 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5s) 0s ago 11m 92.3M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:34:25.564 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to stop rgw.foo.vm04.uxumrv on host 'vm04' 2026-04-16T19:34:25.818 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:34:26.033 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:34:26.033 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 5m ago 11m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:34:26.033 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5m) 5m ago 11m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:34:26.033 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5s) 0s ago 11m 92.3M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:34:26.033 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (11m) 0s ago 11m 130M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:34:26.312 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.191625+0000 mon.vm01 (mon.0) 1206 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.191625+0000 mon.vm01 (mon.0) 1206 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.199259+0000 mon.vm01 (mon.0) 1207 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.199259+0000 mon.vm01 (mon.0) 1207 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.200138+0000 mon.vm01 (mon.0) 1208 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.200138+0000 mon.vm01 (mon.0) 1208 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.200730+0000 mon.vm01 (mon.0) 1209 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.200730+0000 mon.vm01 (mon.0) 1209 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: cluster 2026-04-16T19:34:25.201947+0000 mgr.vm01.nwhpas (mgr.14227) 758 : cluster [DBG] pgmap v418: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 235 B/s wr, 147 op/s 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: cluster 2026-04-16T19:34:25.201947+0000 mgr.vm01.nwhpas (mgr.14227) 758 : cluster [DBG] pgmap v418: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 235 B/s wr, 147 op/s 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.204689+0000 mon.vm01 (mon.0) 1210 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.204689+0000 mon.vm01 (mon.0) 1210 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.206202+0000 mon.vm01 (mon.0) 1211 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:26.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.206202+0000 mon.vm01 (mon.0) 1211 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.327111+0000 mgr.vm01.nwhpas (mgr.14227) 759 : audit [DBG] from='client.16188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.327111+0000 mgr.vm01.nwhpas (mgr.14227) 759 : audit [DBG] from='client.16188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.547541+0000 mgr.vm01.nwhpas (mgr.14227) 760 : audit [DBG] from='client.16192 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.uxumrv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.547541+0000 mgr.vm01.nwhpas (mgr.14227) 760 : audit [DBG] from='client.16192 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.uxumrv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: cephadm 2026-04-16T19:34:25.548121+0000 mgr.vm01.nwhpas (mgr.14227) 761 : cephadm [INF] Schedule stop daemon rgw.foo.vm04.uxumrv 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: cephadm 2026-04-16T19:34:25.548121+0000 mgr.vm01.nwhpas (mgr.14227) 761 : cephadm [INF] Schedule stop daemon rgw.foo.vm04.uxumrv 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.554835+0000 mon.vm01 (mon.0) 1212 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.554835+0000 mon.vm01 (mon.0) 1212 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.559349+0000 mon.vm01 (mon.0) 1213 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.559349+0000 mon.vm01 (mon.0) 1213 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.559991+0000 mon.vm01 (mon.0) 1214 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.559991+0000 mon.vm01 (mon.0) 1214 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.561047+0000 mon.vm01 (mon.0) 1215 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.561047+0000 mon.vm01 (mon.0) 1215 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.561598+0000 mon.vm01 (mon.0) 1216 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.561598+0000 mon.vm01 (mon.0) 1216 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.565713+0000 mon.vm01 (mon.0) 1217 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.565713+0000 mon.vm01 (mon.0) 1217 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.567263+0000 mon.vm01 (mon.0) 1218 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.567263+0000 mon.vm01 (mon.0) 1218 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.793615+0000 mgr.vm01.nwhpas (mgr.14227) 762 : audit [DBG] from='client.16196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:26.463 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:26 vm04 bash[34817]: audit 2026-04-16T19:34:25.793615+0000 mgr.vm01.nwhpas (mgr.14227) 762 : audit [DBG] from='client.16196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.191625+0000 mon.vm01 (mon.0) 1206 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.191625+0000 mon.vm01 (mon.0) 1206 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.199259+0000 mon.vm01 (mon.0) 1207 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.199259+0000 mon.vm01 (mon.0) 1207 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.200138+0000 mon.vm01 (mon.0) 1208 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.200138+0000 mon.vm01 (mon.0) 1208 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.200730+0000 mon.vm01 (mon.0) 1209 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.200730+0000 mon.vm01 (mon.0) 1209 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: cluster 2026-04-16T19:34:25.201947+0000 mgr.vm01.nwhpas (mgr.14227) 758 : cluster [DBG] pgmap v418: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 235 B/s wr, 147 op/s 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: cluster 2026-04-16T19:34:25.201947+0000 mgr.vm01.nwhpas (mgr.14227) 758 : cluster [DBG] pgmap v418: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 235 B/s wr, 147 op/s 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.204689+0000 mon.vm01 (mon.0) 1210 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.204689+0000 mon.vm01 (mon.0) 1210 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.206202+0000 mon.vm01 (mon.0) 1211 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.206202+0000 mon.vm01 (mon.0) 1211 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.327111+0000 mgr.vm01.nwhpas (mgr.14227) 759 : audit [DBG] from='client.16188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.327111+0000 mgr.vm01.nwhpas (mgr.14227) 759 : audit [DBG] from='client.16188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.547541+0000 mgr.vm01.nwhpas (mgr.14227) 760 : audit [DBG] from='client.16192 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.uxumrv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.547541+0000 mgr.vm01.nwhpas (mgr.14227) 760 : audit [DBG] from='client.16192 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm04.uxumrv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: cephadm 2026-04-16T19:34:25.548121+0000 mgr.vm01.nwhpas (mgr.14227) 761 : cephadm [INF] Schedule stop daemon rgw.foo.vm04.uxumrv 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: cephadm 2026-04-16T19:34:25.548121+0000 mgr.vm01.nwhpas (mgr.14227) 761 : cephadm [INF] Schedule stop daemon rgw.foo.vm04.uxumrv 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.554835+0000 mon.vm01 (mon.0) 1212 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.554835+0000 mon.vm01 (mon.0) 1212 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.559349+0000 mon.vm01 (mon.0) 1213 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.559349+0000 mon.vm01 (mon.0) 1213 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.559991+0000 mon.vm01 (mon.0) 1214 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.559991+0000 mon.vm01 (mon.0) 1214 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.561047+0000 mon.vm01 (mon.0) 1215 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.561047+0000 mon.vm01 (mon.0) 1215 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.561598+0000 mon.vm01 (mon.0) 1216 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.561598+0000 mon.vm01 (mon.0) 1216 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.565713+0000 mon.vm01 (mon.0) 1217 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.565713+0000 mon.vm01 (mon.0) 1217 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.567263+0000 mon.vm01 (mon.0) 1218 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.567263+0000 mon.vm01 (mon.0) 1218 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.793615+0000 mgr.vm01.nwhpas (mgr.14227) 762 : audit [DBG] from='client.16196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:26.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:26 vm01 bash[28222]: audit 2026-04-16T19:34:25.793615+0000 mgr.vm01.nwhpas (mgr.14227) 762 : audit [DBG] from='client.16196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:27.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:27 vm04 bash[34817]: audit 2026-04-16T19:34:26.023984+0000 mgr.vm01.nwhpas (mgr.14227) 763 : audit [DBG] from='client.16200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:27.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:27 vm04 bash[34817]: audit 2026-04-16T19:34:26.023984+0000 mgr.vm01.nwhpas (mgr.14227) 763 : audit [DBG] from='client.16200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:27.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:27 vm04 bash[34817]: cluster 2026-04-16T19:34:26.197845+0000 mon.vm01 (mon.0) 1219 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:34:27.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:27 vm04 bash[34817]: cluster 2026-04-16T19:34:26.197845+0000 mon.vm01 (mon.0) 1219 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:34:27.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:27 vm04 bash[34817]: cluster 2026-04-16T19:34:26.197863+0000 mon.vm01 (mon.0) 1220 : cluster [INF] Cluster is now healthy 2026-04-16T19:34:27.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:27 vm04 bash[34817]: cluster 2026-04-16T19:34:26.197863+0000 mon.vm01 (mon.0) 1220 : cluster [INF] Cluster is now healthy 2026-04-16T19:34:27.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:27 vm04 bash[34817]: audit 2026-04-16T19:34:26.307032+0000 mon.vm01 (mon.0) 1221 : audit [DBG] from='client.? 192.168.123.101:0/261375359' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:27.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:27 vm04 bash[34817]: audit 2026-04-16T19:34:26.307032+0000 mon.vm01 (mon.0) 1221 : audit [DBG] from='client.? 192.168.123.101:0/261375359' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:27.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:27 vm01 bash[28222]: audit 2026-04-16T19:34:26.023984+0000 mgr.vm01.nwhpas (mgr.14227) 763 : audit [DBG] from='client.16200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:27.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:27 vm01 bash[28222]: audit 2026-04-16T19:34:26.023984+0000 mgr.vm01.nwhpas (mgr.14227) 763 : audit [DBG] from='client.16200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:27.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:27 vm01 bash[28222]: cluster 2026-04-16T19:34:26.197845+0000 mon.vm01 (mon.0) 1219 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:34:27.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:27 vm01 bash[28222]: cluster 2026-04-16T19:34:26.197845+0000 mon.vm01 (mon.0) 1219 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:34:27.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:27 vm01 bash[28222]: cluster 2026-04-16T19:34:26.197863+0000 mon.vm01 (mon.0) 1220 : cluster [INF] Cluster is now healthy 2026-04-16T19:34:27.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:27 vm01 bash[28222]: cluster 2026-04-16T19:34:26.197863+0000 mon.vm01 (mon.0) 1220 : cluster [INF] Cluster is now healthy 2026-04-16T19:34:27.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:27 vm01 bash[28222]: audit 2026-04-16T19:34:26.307032+0000 mon.vm01 (mon.0) 1221 : audit [DBG] from='client.? 192.168.123.101:0/261375359' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:27.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:27 vm01 bash[28222]: audit 2026-04-16T19:34:26.307032+0000 mon.vm01 (mon.0) 1221 : audit [DBG] from='client.? 192.168.123.101:0/261375359' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:28.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:28 vm04 bash[34817]: cluster 2026-04-16T19:34:27.202424+0000 mgr.vm01.nwhpas (mgr.14227) 764 : cluster [DBG] pgmap v419: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 KiB/s rd, 0 B/s wr, 150 op/s 2026-04-16T19:34:28.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:28 vm04 bash[34817]: cluster 2026-04-16T19:34:27.202424+0000 mgr.vm01.nwhpas (mgr.14227) 764 : cluster [DBG] pgmap v419: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 KiB/s rd, 0 B/s wr, 150 op/s 2026-04-16T19:34:28.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:28 vm01 bash[28222]: cluster 2026-04-16T19:34:27.202424+0000 mgr.vm01.nwhpas (mgr.14227) 764 : cluster [DBG] pgmap v419: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 KiB/s rd, 0 B/s wr, 150 op/s 2026-04-16T19:34:28.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:28 vm01 bash[28222]: cluster 2026-04-16T19:34:27.202424+0000 mgr.vm01.nwhpas (mgr.14227) 764 : cluster [DBG] pgmap v419: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 92 KiB/s rd, 0 B/s wr, 150 op/s 2026-04-16T19:34:30.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:30 vm04 bash[34817]: cluster 2026-04-16T19:34:29.202929+0000 mgr.vm01.nwhpas (mgr.14227) 765 : cluster [DBG] pgmap v420: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 81 KiB/s rd, 416 B/s wr, 133 op/s 2026-04-16T19:34:30.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:30 vm04 bash[34817]: cluster 2026-04-16T19:34:29.202929+0000 mgr.vm01.nwhpas (mgr.14227) 765 : cluster [DBG] pgmap v420: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 81 KiB/s rd, 416 B/s wr, 133 op/s 2026-04-16T19:34:30.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:30 vm01 bash[28222]: cluster 2026-04-16T19:34:29.202929+0000 mgr.vm01.nwhpas (mgr.14227) 765 : cluster [DBG] pgmap v420: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 81 KiB/s rd, 416 B/s wr, 133 op/s 2026-04-16T19:34:30.712 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:30 vm01 bash[28222]: cluster 2026-04-16T19:34:29.202929+0000 mgr.vm01.nwhpas (mgr.14227) 765 : cluster [DBG] pgmap v420: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 81 KiB/s rd, 416 B/s wr, 133 op/s 2026-04-16T19:34:31.580 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:34:31.773 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:34:31.773 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 5m ago 11m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:34:31.773 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5m) 5m ago 11m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:34:31.773 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (11s) 6s ago 11m 92.3M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:34:31.773 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (11m) 6s ago 11m 130M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:34:32.048 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:34:32.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:32 vm04 bash[34817]: cluster 2026-04-16T19:34:31.203382+0000 mgr.vm01.nwhpas (mgr.14227) 766 : cluster [DBG] pgmap v421: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 346 B/s wr, 110 op/s 2026-04-16T19:34:32.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:32 vm04 bash[34817]: cluster 2026-04-16T19:34:31.203382+0000 mgr.vm01.nwhpas (mgr.14227) 766 : cluster [DBG] pgmap v421: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 346 B/s wr, 110 op/s 2026-04-16T19:34:32.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:32 vm04 bash[34817]: audit 2026-04-16T19:34:31.550843+0000 mgr.vm01.nwhpas (mgr.14227) 767 : audit [DBG] from='client.16208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:32.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:32 vm04 bash[34817]: audit 2026-04-16T19:34:31.550843+0000 mgr.vm01.nwhpas (mgr.14227) 767 : audit [DBG] from='client.16208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:32.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:32 vm04 bash[34817]: audit 2026-04-16T19:34:31.765024+0000 mgr.vm01.nwhpas (mgr.14227) 768 : audit [DBG] from='client.16212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:32.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:32 vm04 bash[34817]: audit 2026-04-16T19:34:31.765024+0000 mgr.vm01.nwhpas (mgr.14227) 768 : audit [DBG] from='client.16212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:32.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:32 vm04 bash[34817]: audit 2026-04-16T19:34:32.042666+0000 mon.vm01 (mon.0) 1222 : audit [DBG] from='client.? 192.168.123.101:0/1708316969' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:32.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:32 vm04 bash[34817]: audit 2026-04-16T19:34:32.042666+0000 mon.vm01 (mon.0) 1222 : audit [DBG] from='client.? 192.168.123.101:0/1708316969' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:32.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:32 vm01 bash[28222]: cluster 2026-04-16T19:34:31.203382+0000 mgr.vm01.nwhpas (mgr.14227) 766 : cluster [DBG] pgmap v421: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 346 B/s wr, 110 op/s 2026-04-16T19:34:32.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:32 vm01 bash[28222]: cluster 2026-04-16T19:34:31.203382+0000 mgr.vm01.nwhpas (mgr.14227) 766 : cluster [DBG] pgmap v421: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 346 B/s wr, 110 op/s 2026-04-16T19:34:32.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:32 vm01 bash[28222]: audit 2026-04-16T19:34:31.550843+0000 mgr.vm01.nwhpas (mgr.14227) 767 : audit [DBG] from='client.16208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:32.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:32 vm01 bash[28222]: audit 2026-04-16T19:34:31.550843+0000 mgr.vm01.nwhpas (mgr.14227) 767 : audit [DBG] from='client.16208 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:32.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:32 vm01 bash[28222]: audit 2026-04-16T19:34:31.765024+0000 mgr.vm01.nwhpas (mgr.14227) 768 : audit [DBG] from='client.16212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:32.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:32 vm01 bash[28222]: audit 2026-04-16T19:34:31.765024+0000 mgr.vm01.nwhpas (mgr.14227) 768 : audit [DBG] from='client.16212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:32.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:32 vm01 bash[28222]: audit 2026-04-16T19:34:32.042666+0000 mon.vm01 (mon.0) 1222 : audit [DBG] from='client.? 192.168.123.101:0/1708316969' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:32.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:32 vm01 bash[28222]: audit 2026-04-16T19:34:32.042666+0000 mon.vm01 (mon.0) 1222 : audit [DBG] from='client.? 192.168.123.101:0/1708316969' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:34.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:34 vm01 bash[28222]: cluster 2026-04-16T19:34:33.203832+0000 mgr.vm01.nwhpas (mgr.14227) 769 : cluster [DBG] pgmap v422: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 346 B/s wr, 74 op/s 2026-04-16T19:34:34.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:34 vm01 bash[28222]: cluster 2026-04-16T19:34:33.203832+0000 mgr.vm01.nwhpas (mgr.14227) 769 : cluster [DBG] pgmap v422: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 346 B/s wr, 74 op/s 2026-04-16T19:34:34.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:34 vm04 bash[34817]: cluster 2026-04-16T19:34:33.203832+0000 mgr.vm01.nwhpas (mgr.14227) 769 : cluster [DBG] pgmap v422: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 346 B/s wr, 74 op/s 2026-04-16T19:34:34.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:34 vm04 bash[34817]: cluster 2026-04-16T19:34:33.203832+0000 mgr.vm01.nwhpas (mgr.14227) 769 : cluster [DBG] pgmap v422: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 346 B/s wr, 74 op/s 2026-04-16T19:34:36.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:36 vm04 bash[34817]: cluster 2026-04-16T19:34:35.204288+0000 mgr.vm01.nwhpas (mgr.14227) 770 : cluster [DBG] pgmap v423: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 346 B/s wr, 49 op/s 2026-04-16T19:34:36.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:36 vm04 bash[34817]: cluster 2026-04-16T19:34:35.204288+0000 mgr.vm01.nwhpas (mgr.14227) 770 : cluster [DBG] pgmap v423: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 346 B/s wr, 49 op/s 2026-04-16T19:34:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:36 vm01 bash[28222]: cluster 2026-04-16T19:34:35.204288+0000 mgr.vm01.nwhpas (mgr.14227) 770 : cluster [DBG] pgmap v423: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 346 B/s wr, 49 op/s 2026-04-16T19:34:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:36 vm01 bash[28222]: cluster 2026-04-16T19:34:35.204288+0000 mgr.vm01.nwhpas (mgr.14227) 770 : cluster [DBG] pgmap v423: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 346 B/s wr, 49 op/s 2026-04-16T19:34:37.280 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:34:37.476 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:34:37.476 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 5m ago 11m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:34:37.476 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5m) 5m ago 11m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:34:37.476 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (17s) 12s ago 11m 92.3M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:34:37.476 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (11m) 12s ago 11m 130M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 775ed42085b6 2026-04-16T19:34:37.723 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:36.834583+0000 mon.vm01 (mon.0) 1223 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:36.834583+0000 mon.vm01 (mon.0) 1223 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:36.841459+0000 mon.vm01 (mon.0) 1224 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:36.841459+0000 mon.vm01 (mon.0) 1224 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:36.842913+0000 mon.vm01 (mon.0) 1225 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:36.842913+0000 mon.vm01 (mon.0) 1225 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:37.584400+0000 mon.vm01 (mon.0) 1226 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:37.584400+0000 mon.vm01 (mon.0) 1226 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:37.585135+0000 mon.vm01 (mon.0) 1227 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:37.585135+0000 mon.vm01 (mon.0) 1227 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:37.717566+0000 mon.vm01 (mon.0) 1228 : audit [DBG] from='client.? 192.168.123.101:0/3042874105' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:37 vm04 bash[34817]: audit 2026-04-16T19:34:37.717566+0000 mon.vm01 (mon.0) 1228 : audit [DBG] from='client.? 192.168.123.101:0/3042874105' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:36.834583+0000 mon.vm01 (mon.0) 1223 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:36.834583+0000 mon.vm01 (mon.0) 1223 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:36.841459+0000 mon.vm01 (mon.0) 1224 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:36.841459+0000 mon.vm01 (mon.0) 1224 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:36.842913+0000 mon.vm01 (mon.0) 1225 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:36.842913+0000 mon.vm01 (mon.0) 1225 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:37.584400+0000 mon.vm01 (mon.0) 1226 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:37.584400+0000 mon.vm01 (mon.0) 1226 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:37.585135+0000 mon.vm01 (mon.0) 1227 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:37.585135+0000 mon.vm01 (mon.0) 1227 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:37.717566+0000 mon.vm01 (mon.0) 1228 : audit [DBG] from='client.? 192.168.123.101:0/3042874105' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:37 vm01 bash[28222]: audit 2026-04-16T19:34:37.717566+0000 mon.vm01 (mon.0) 1228 : audit [DBG] from='client.? 192.168.123.101:0/3042874105' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:39.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:38 vm04 bash[34817]: cluster 2026-04-16T19:34:37.204732+0000 mgr.vm01.nwhpas (mgr.14227) 771 : cluster [DBG] pgmap v424: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 341 B/s wr, 2 op/s 2026-04-16T19:34:39.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:38 vm04 bash[34817]: cluster 2026-04-16T19:34:37.204732+0000 mgr.vm01.nwhpas (mgr.14227) 771 : cluster [DBG] pgmap v424: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 341 B/s wr, 2 op/s 2026-04-16T19:34:39.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:38 vm04 bash[34817]: audit 2026-04-16T19:34:37.256190+0000 mgr.vm01.nwhpas (mgr.14227) 772 : audit [DBG] from='client.16220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:39.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:38 vm04 bash[34817]: audit 2026-04-16T19:34:37.256190+0000 mgr.vm01.nwhpas (mgr.14227) 772 : audit [DBG] from='client.16220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:39.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:38 vm04 bash[34817]: audit 2026-04-16T19:34:37.466576+0000 mgr.vm01.nwhpas (mgr.14227) 773 : audit [DBG] from='client.16224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:39.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:38 vm04 bash[34817]: audit 2026-04-16T19:34:37.466576+0000 mgr.vm01.nwhpas (mgr.14227) 773 : audit [DBG] from='client.16224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:38 vm01 bash[28222]: cluster 2026-04-16T19:34:37.204732+0000 mgr.vm01.nwhpas (mgr.14227) 771 : cluster [DBG] pgmap v424: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 341 B/s wr, 2 op/s 2026-04-16T19:34:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:38 vm01 bash[28222]: cluster 2026-04-16T19:34:37.204732+0000 mgr.vm01.nwhpas (mgr.14227) 771 : cluster [DBG] pgmap v424: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 341 B/s wr, 2 op/s 2026-04-16T19:34:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:38 vm01 bash[28222]: audit 2026-04-16T19:34:37.256190+0000 mgr.vm01.nwhpas (mgr.14227) 772 : audit [DBG] from='client.16220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:38 vm01 bash[28222]: audit 2026-04-16T19:34:37.256190+0000 mgr.vm01.nwhpas (mgr.14227) 772 : audit [DBG] from='client.16220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:39.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:38 vm01 bash[28222]: audit 2026-04-16T19:34:37.466576+0000 mgr.vm01.nwhpas (mgr.14227) 773 : audit [DBG] from='client.16224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:39.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:38 vm01 bash[28222]: audit 2026-04-16T19:34:37.466576+0000 mgr.vm01.nwhpas (mgr.14227) 773 : audit [DBG] from='client.16224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:41.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:40 vm04 bash[34817]: cluster 2026-04-16T19:34:39.205322+0000 mgr.vm01.nwhpas (mgr.14227) 774 : cluster [DBG] pgmap v425: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:41.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:40 vm04 bash[34817]: cluster 2026-04-16T19:34:39.205322+0000 mgr.vm01.nwhpas (mgr.14227) 774 : cluster [DBG] pgmap v425: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:41.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:40 vm01 bash[28222]: cluster 2026-04-16T19:34:39.205322+0000 mgr.vm01.nwhpas (mgr.14227) 774 : cluster [DBG] pgmap v425: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:41.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:40 vm01 bash[28222]: cluster 2026-04-16T19:34:39.205322+0000 mgr.vm01.nwhpas (mgr.14227) 774 : cluster [DBG] pgmap v425: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:42.949 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:34:43.149 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:34:43.149 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 5m ago 11m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:34:43.149 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5m) 5m ago 11m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:34:43.149 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (23s) 1s ago 11m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:34:43.149 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 1s ago 11m - - 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: cluster 2026-04-16T19:34:41.205717+0000 mgr.vm01.nwhpas (mgr.14227) 775 : cluster [DBG] pgmap v426: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: cluster 2026-04-16T19:34:41.205717+0000 mgr.vm01.nwhpas (mgr.14227) 775 : cluster [DBG] pgmap v426: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:41.748660+0000 mon.vm01 (mon.0) 1229 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:41.748660+0000 mon.vm01 (mon.0) 1229 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:41.753766+0000 mon.vm01 (mon.0) 1230 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:41.753766+0000 mon.vm01 (mon.0) 1230 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:42.117567+0000 mon.vm01 (mon.0) 1231 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:42.117567+0000 mon.vm01 (mon.0) 1231 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:42.118178+0000 mon.vm01 (mon.0) 1232 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:42.118178+0000 mon.vm01 (mon.0) 1232 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:42.122709+0000 mon.vm01 (mon.0) 1233 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:42.122709+0000 mon.vm01 (mon.0) 1233 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:42.124410+0000 mon.vm01 (mon.0) 1234 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:42 vm04 bash[34817]: audit 2026-04-16T19:34:42.124410+0000 mon.vm01 (mon.0) 1234 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:43.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: cluster 2026-04-16T19:34:41.205717+0000 mgr.vm01.nwhpas (mgr.14227) 775 : cluster [DBG] pgmap v426: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:43.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: cluster 2026-04-16T19:34:41.205717+0000 mgr.vm01.nwhpas (mgr.14227) 775 : cluster [DBG] pgmap v426: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:43.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:41.748660+0000 mon.vm01 (mon.0) 1229 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:41.748660+0000 mon.vm01 (mon.0) 1229 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:41.753766+0000 mon.vm01 (mon.0) 1230 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:41.753766+0000 mon.vm01 (mon.0) 1230 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:42.117567+0000 mon.vm01 (mon.0) 1231 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:43.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:42.117567+0000 mon.vm01 (mon.0) 1231 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:34:43.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:42.118178+0000 mon.vm01 (mon.0) 1232 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:43.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:42.118178+0000 mon.vm01 (mon.0) 1232 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:34:43.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:42.122709+0000 mon.vm01 (mon.0) 1233 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:42.122709+0000 mon.vm01 (mon.0) 1233 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:43.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:42.124410+0000 mon.vm01 (mon.0) 1234 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:43.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:42 vm01 bash[28222]: audit 2026-04-16T19:34:42.124410+0000 mon.vm01 (mon.0) 1234 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:34:43.399 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:34:43.399 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:34:43.399 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:34:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:43 vm04 bash[34817]: cluster 2026-04-16T19:34:42.119157+0000 mgr.vm01.nwhpas (mgr.14227) 776 : cluster [DBG] pgmap v427: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:43 vm04 bash[34817]: cluster 2026-04-16T19:34:42.119157+0000 mgr.vm01.nwhpas (mgr.14227) 776 : cluster [DBG] pgmap v427: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:43 vm04 bash[34817]: cluster 2026-04-16T19:34:42.853156+0000 mon.vm01 (mon.0) 1235 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:34:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:43 vm04 bash[34817]: cluster 2026-04-16T19:34:42.853156+0000 mon.vm01 (mon.0) 1235 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:34:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:43 vm04 bash[34817]: audit 2026-04-16T19:34:42.926525+0000 mgr.vm01.nwhpas (mgr.14227) 777 : audit [DBG] from='client.16232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:43 vm04 bash[34817]: audit 2026-04-16T19:34:42.926525+0000 mgr.vm01.nwhpas (mgr.14227) 777 : audit [DBG] from='client.16232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:43 vm04 bash[34817]: audit 2026-04-16T19:34:43.394114+0000 mon.vm01 (mon.0) 1236 : audit [DBG] from='client.? 192.168.123.101:0/656020649' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:43 vm04 bash[34817]: audit 2026-04-16T19:34:43.394114+0000 mon.vm01 (mon.0) 1236 : audit [DBG] from='client.? 192.168.123.101:0/656020649' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:44.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:43 vm01 bash[28222]: cluster 2026-04-16T19:34:42.119157+0000 mgr.vm01.nwhpas (mgr.14227) 776 : cluster [DBG] pgmap v427: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:44.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:43 vm01 bash[28222]: cluster 2026-04-16T19:34:42.119157+0000 mgr.vm01.nwhpas (mgr.14227) 776 : cluster [DBG] pgmap v427: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:44.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:43 vm01 bash[28222]: cluster 2026-04-16T19:34:42.853156+0000 mon.vm01 (mon.0) 1235 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:34:44.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:43 vm01 bash[28222]: cluster 2026-04-16T19:34:42.853156+0000 mon.vm01 (mon.0) 1235 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-16T19:34:44.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:43 vm01 bash[28222]: audit 2026-04-16T19:34:42.926525+0000 mgr.vm01.nwhpas (mgr.14227) 777 : audit [DBG] from='client.16232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:44.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:43 vm01 bash[28222]: audit 2026-04-16T19:34:42.926525+0000 mgr.vm01.nwhpas (mgr.14227) 777 : audit [DBG] from='client.16232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:44.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:43 vm01 bash[28222]: audit 2026-04-16T19:34:43.394114+0000 mon.vm01 (mon.0) 1236 : audit [DBG] from='client.? 192.168.123.101:0/656020649' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:44.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:43 vm01 bash[28222]: audit 2026-04-16T19:34:43.394114+0000 mon.vm01 (mon.0) 1236 : audit [DBG] from='client.? 192.168.123.101:0/656020649' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:45.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:44 vm04 bash[34817]: audit 2026-04-16T19:34:43.141363+0000 mgr.vm01.nwhpas (mgr.14227) 778 : audit [DBG] from='client.16236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:45.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:44 vm04 bash[34817]: audit 2026-04-16T19:34:43.141363+0000 mgr.vm01.nwhpas (mgr.14227) 778 : audit [DBG] from='client.16236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:45.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:44 vm01 bash[28222]: audit 2026-04-16T19:34:43.141363+0000 mgr.vm01.nwhpas (mgr.14227) 778 : audit [DBG] from='client.16236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:45.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:44 vm01 bash[28222]: audit 2026-04-16T19:34:43.141363+0000 mgr.vm01.nwhpas (mgr.14227) 778 : audit [DBG] from='client.16236 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:45 vm04 bash[34817]: cluster 2026-04-16T19:34:44.119598+0000 mgr.vm01.nwhpas (mgr.14227) 779 : cluster [DBG] pgmap v428: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:45 vm04 bash[34817]: cluster 2026-04-16T19:34:44.119598+0000 mgr.vm01.nwhpas (mgr.14227) 779 : cluster [DBG] pgmap v428: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:45 vm01 bash[28222]: cluster 2026-04-16T19:34:44.119598+0000 mgr.vm01.nwhpas (mgr.14227) 779 : cluster [DBG] pgmap v428: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:46.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:45 vm01 bash[28222]: cluster 2026-04-16T19:34:44.119598+0000 mgr.vm01.nwhpas (mgr.14227) 779 : cluster [DBG] pgmap v428: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:47 vm04 bash[34817]: cluster 2026-04-16T19:34:46.119962+0000 mgr.vm01.nwhpas (mgr.14227) 780 : cluster [DBG] pgmap v429: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:47 vm04 bash[34817]: cluster 2026-04-16T19:34:46.119962+0000 mgr.vm01.nwhpas (mgr.14227) 780 : cluster [DBG] pgmap v429: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:48.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:47 vm01 bash[28222]: cluster 2026-04-16T19:34:46.119962+0000 mgr.vm01.nwhpas (mgr.14227) 780 : cluster [DBG] pgmap v429: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:48.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:47 vm01 bash[28222]: cluster 2026-04-16T19:34:46.119962+0000 mgr.vm01.nwhpas (mgr.14227) 780 : cluster [DBG] pgmap v429: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:34:48.625 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:34:48.812 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:34:48.813 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 5m ago 11m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:34:48.813 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5m) 5m ago 11m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:34:48.813 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (28s) 7s ago 11m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:34:48.813 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 7s ago 11m - - 2026-04-16T19:34:49.066 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:34:49.066 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:34:49.066 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:34:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:49 vm04 bash[34817]: cluster 2026-04-16T19:34:48.120322+0000 mgr.vm01.nwhpas (mgr.14227) 781 : cluster [DBG] pgmap v430: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:49 vm04 bash[34817]: cluster 2026-04-16T19:34:48.120322+0000 mgr.vm01.nwhpas (mgr.14227) 781 : cluster [DBG] pgmap v430: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:49 vm04 bash[34817]: audit 2026-04-16T19:34:48.602795+0000 mgr.vm01.nwhpas (mgr.14227) 782 : audit [DBG] from='client.16244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:49 vm04 bash[34817]: audit 2026-04-16T19:34:48.602795+0000 mgr.vm01.nwhpas (mgr.14227) 782 : audit [DBG] from='client.16244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:49 vm04 bash[34817]: audit 2026-04-16T19:34:48.804844+0000 mgr.vm01.nwhpas (mgr.14227) 783 : audit [DBG] from='client.16248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:49 vm04 bash[34817]: audit 2026-04-16T19:34:48.804844+0000 mgr.vm01.nwhpas (mgr.14227) 783 : audit [DBG] from='client.16248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:49 vm04 bash[34817]: audit 2026-04-16T19:34:49.061575+0000 mon.vm01 (mon.0) 1237 : audit [DBG] from='client.? 192.168.123.101:0/3000869139' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:49 vm04 bash[34817]: audit 2026-04-16T19:34:49.061575+0000 mon.vm01 (mon.0) 1237 : audit [DBG] from='client.? 192.168.123.101:0/3000869139' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:49 vm01 bash[28222]: cluster 2026-04-16T19:34:48.120322+0000 mgr.vm01.nwhpas (mgr.14227) 781 : cluster [DBG] pgmap v430: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:49 vm01 bash[28222]: cluster 2026-04-16T19:34:48.120322+0000 mgr.vm01.nwhpas (mgr.14227) 781 : cluster [DBG] pgmap v430: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:49 vm01 bash[28222]: audit 2026-04-16T19:34:48.602795+0000 mgr.vm01.nwhpas (mgr.14227) 782 : audit [DBG] from='client.16244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:49 vm01 bash[28222]: audit 2026-04-16T19:34:48.602795+0000 mgr.vm01.nwhpas (mgr.14227) 782 : audit [DBG] from='client.16244 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:49 vm01 bash[28222]: audit 2026-04-16T19:34:48.804844+0000 mgr.vm01.nwhpas (mgr.14227) 783 : audit [DBG] from='client.16248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:49 vm01 bash[28222]: audit 2026-04-16T19:34:48.804844+0000 mgr.vm01.nwhpas (mgr.14227) 783 : audit [DBG] from='client.16248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:49 vm01 bash[28222]: audit 2026-04-16T19:34:49.061575+0000 mon.vm01 (mon.0) 1237 : audit [DBG] from='client.? 192.168.123.101:0/3000869139' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:49 vm01 bash[28222]: audit 2026-04-16T19:34:49.061575+0000 mon.vm01 (mon.0) 1237 : audit [DBG] from='client.? 192.168.123.101:0/3000869139' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:52.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:51 vm04 bash[34817]: cluster 2026-04-16T19:34:50.120756+0000 mgr.vm01.nwhpas (mgr.14227) 784 : cluster [DBG] pgmap v431: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:52.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:51 vm04 bash[34817]: cluster 2026-04-16T19:34:50.120756+0000 mgr.vm01.nwhpas (mgr.14227) 784 : cluster [DBG] pgmap v431: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:52.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:51 vm01 bash[28222]: cluster 2026-04-16T19:34:50.120756+0000 mgr.vm01.nwhpas (mgr.14227) 784 : cluster [DBG] pgmap v431: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:52.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:51 vm01 bash[28222]: cluster 2026-04-16T19:34:50.120756+0000 mgr.vm01.nwhpas (mgr.14227) 784 : cluster [DBG] pgmap v431: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:53.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:53 vm04 bash[34817]: cluster 2026-04-16T19:34:52.121167+0000 mgr.vm01.nwhpas (mgr.14227) 785 : cluster [DBG] pgmap v432: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:53.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:53 vm04 bash[34817]: cluster 2026-04-16T19:34:52.121167+0000 mgr.vm01.nwhpas (mgr.14227) 785 : cluster [DBG] pgmap v432: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:53.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:53 vm04 bash[34817]: audit 2026-04-16T19:34:52.587667+0000 mon.vm01 (mon.0) 1238 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:53.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:53 vm04 bash[34817]: audit 2026-04-16T19:34:52.587667+0000 mon.vm01 (mon.0) 1238 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:53.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:53 vm04 bash[34817]: audit 2026-04-16T19:34:52.588248+0000 mon.vm01 (mon.0) 1239 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:53.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:53 vm04 bash[34817]: audit 2026-04-16T19:34:52.588248+0000 mon.vm01 (mon.0) 1239 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:53.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:53 vm01 bash[28222]: cluster 2026-04-16T19:34:52.121167+0000 mgr.vm01.nwhpas (mgr.14227) 785 : cluster [DBG] pgmap v432: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:53.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:53 vm01 bash[28222]: cluster 2026-04-16T19:34:52.121167+0000 mgr.vm01.nwhpas (mgr.14227) 785 : cluster [DBG] pgmap v432: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-04-16T19:34:53.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:53 vm01 bash[28222]: audit 2026-04-16T19:34:52.587667+0000 mon.vm01 (mon.0) 1238 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:53.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:53 vm01 bash[28222]: audit 2026-04-16T19:34:52.587667+0000 mon.vm01 (mon.0) 1238 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:34:53.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:53 vm01 bash[28222]: audit 2026-04-16T19:34:52.588248+0000 mon.vm01 (mon.0) 1239 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:53.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:53 vm01 bash[28222]: audit 2026-04-16T19:34:52.588248+0000 mon.vm01 (mon.0) 1239 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:34:54.291 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:34:54.475 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:34:54.475 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (10m) 5m ago 11m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:34:54.475 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5m) 5m ago 11m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:34:54.475 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (34s) 12s ago 11m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:34:54.475 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 12s ago 11m - - 2026-04-16T19:34:54.723 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:34:54.723 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:34:54.723 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:34:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:55 vm04 bash[34817]: cluster 2026-04-16T19:34:54.121636+0000 mgr.vm01.nwhpas (mgr.14227) 786 : cluster [DBG] pgmap v433: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:55 vm04 bash[34817]: cluster 2026-04-16T19:34:54.121636+0000 mgr.vm01.nwhpas (mgr.14227) 786 : cluster [DBG] pgmap v433: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:55 vm04 bash[34817]: audit 2026-04-16T19:34:54.269608+0000 mgr.vm01.nwhpas (mgr.14227) 787 : audit [DBG] from='client.16256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:55 vm04 bash[34817]: audit 2026-04-16T19:34:54.269608+0000 mgr.vm01.nwhpas (mgr.14227) 787 : audit [DBG] from='client.16256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:55 vm04 bash[34817]: audit 2026-04-16T19:34:54.465497+0000 mgr.vm01.nwhpas (mgr.14227) 788 : audit [DBG] from='client.16260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:55 vm04 bash[34817]: audit 2026-04-16T19:34:54.465497+0000 mgr.vm01.nwhpas (mgr.14227) 788 : audit [DBG] from='client.16260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:55 vm04 bash[34817]: audit 2026-04-16T19:34:54.718450+0000 mon.vm01 (mon.0) 1240 : audit [DBG] from='client.? 192.168.123.101:0/880614754' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:55 vm04 bash[34817]: audit 2026-04-16T19:34:54.718450+0000 mon.vm01 (mon.0) 1240 : audit [DBG] from='client.? 192.168.123.101:0/880614754' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:55 vm01 bash[28222]: cluster 2026-04-16T19:34:54.121636+0000 mgr.vm01.nwhpas (mgr.14227) 786 : cluster [DBG] pgmap v433: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:55 vm01 bash[28222]: cluster 2026-04-16T19:34:54.121636+0000 mgr.vm01.nwhpas (mgr.14227) 786 : cluster [DBG] pgmap v433: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:55 vm01 bash[28222]: audit 2026-04-16T19:34:54.269608+0000 mgr.vm01.nwhpas (mgr.14227) 787 : audit [DBG] from='client.16256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:55 vm01 bash[28222]: audit 2026-04-16T19:34:54.269608+0000 mgr.vm01.nwhpas (mgr.14227) 787 : audit [DBG] from='client.16256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:55 vm01 bash[28222]: audit 2026-04-16T19:34:54.465497+0000 mgr.vm01.nwhpas (mgr.14227) 788 : audit [DBG] from='client.16260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:55 vm01 bash[28222]: audit 2026-04-16T19:34:54.465497+0000 mgr.vm01.nwhpas (mgr.14227) 788 : audit [DBG] from='client.16260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:34:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:55 vm01 bash[28222]: audit 2026-04-16T19:34:54.718450+0000 mon.vm01 (mon.0) 1240 : audit [DBG] from='client.? 192.168.123.101:0/880614754' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:55 vm01 bash[28222]: audit 2026-04-16T19:34:54.718450+0000 mon.vm01 (mon.0) 1240 : audit [DBG] from='client.? 192.168.123.101:0/880614754' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:34:57.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:57 vm04 bash[34817]: cluster 2026-04-16T19:34:56.122094+0000 mgr.vm01.nwhpas (mgr.14227) 789 : cluster [DBG] pgmap v434: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:57.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:57 vm04 bash[34817]: cluster 2026-04-16T19:34:56.122094+0000 mgr.vm01.nwhpas (mgr.14227) 789 : cluster [DBG] pgmap v434: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:57.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:57 vm01 bash[28222]: cluster 2026-04-16T19:34:56.122094+0000 mgr.vm01.nwhpas (mgr.14227) 789 : cluster [DBG] pgmap v434: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:57.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:57 vm01 bash[28222]: cluster 2026-04-16T19:34:56.122094+0000 mgr.vm01.nwhpas (mgr.14227) 789 : cluster [DBG] pgmap v434: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:34:59.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:59 vm04 bash[34817]: cluster 2026-04-16T19:34:58.122571+0000 mgr.vm01.nwhpas (mgr.14227) 790 : cluster [DBG] pgmap v435: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:34:59.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:34:59 vm04 bash[34817]: cluster 2026-04-16T19:34:58.122571+0000 mgr.vm01.nwhpas (mgr.14227) 790 : cluster [DBG] pgmap v435: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:34:59.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:59 vm01 bash[28222]: cluster 2026-04-16T19:34:58.122571+0000 mgr.vm01.nwhpas (mgr.14227) 790 : cluster [DBG] pgmap v435: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:34:59.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:34:59 vm01 bash[28222]: cluster 2026-04-16T19:34:58.122571+0000 mgr.vm01.nwhpas (mgr.14227) 790 : cluster [DBG] pgmap v435: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:34:59.980 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:35:00.182 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:35:00.182 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11m) 5m ago 11m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:35:00.182 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5m) 5m ago 11m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:35:00.182 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (40s) 18s ago 11m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:35:00.182 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 18s ago 11m - - 2026-04-16T19:35:00.415 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:35:00.415 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:35:00.415 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:35:00.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:00 vm04 bash[34817]: audit 2026-04-16T19:35:00.409796+0000 mon.vm01 (mon.0) 1241 : audit [DBG] from='client.? 192.168.123.101:0/1748230239' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:00.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:00 vm04 bash[34817]: audit 2026-04-16T19:35:00.409796+0000 mon.vm01 (mon.0) 1241 : audit [DBG] from='client.? 192.168.123.101:0/1748230239' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:00.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:00 vm01 bash[28222]: audit 2026-04-16T19:35:00.409796+0000 mon.vm01 (mon.0) 1241 : audit [DBG] from='client.? 192.168.123.101:0/1748230239' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:00.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:00 vm01 bash[28222]: audit 2026-04-16T19:35:00.409796+0000 mon.vm01 (mon.0) 1241 : audit [DBG] from='client.? 192.168.123.101:0/1748230239' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:01.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:01 vm04 bash[34817]: audit 2026-04-16T19:34:59.951220+0000 mgr.vm01.nwhpas (mgr.14227) 791 : audit [DBG] from='client.16268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:01.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:01 vm04 bash[34817]: audit 2026-04-16T19:34:59.951220+0000 mgr.vm01.nwhpas (mgr.14227) 791 : audit [DBG] from='client.16268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:01.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:01 vm04 bash[34817]: cluster 2026-04-16T19:35:00.123132+0000 mgr.vm01.nwhpas (mgr.14227) 792 : cluster [DBG] pgmap v436: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:01.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:01 vm04 bash[34817]: cluster 2026-04-16T19:35:00.123132+0000 mgr.vm01.nwhpas (mgr.14227) 792 : cluster [DBG] pgmap v436: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:01.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:01 vm04 bash[34817]: audit 2026-04-16T19:35:00.174524+0000 mgr.vm01.nwhpas (mgr.14227) 793 : audit [DBG] from='client.16272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:01.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:01 vm04 bash[34817]: audit 2026-04-16T19:35:00.174524+0000 mgr.vm01.nwhpas (mgr.14227) 793 : audit [DBG] from='client.16272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:01.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:01 vm01 bash[28222]: audit 2026-04-16T19:34:59.951220+0000 mgr.vm01.nwhpas (mgr.14227) 791 : audit [DBG] from='client.16268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:01.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:01 vm01 bash[28222]: audit 2026-04-16T19:34:59.951220+0000 mgr.vm01.nwhpas (mgr.14227) 791 : audit [DBG] from='client.16268 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:01.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:01 vm01 bash[28222]: cluster 2026-04-16T19:35:00.123132+0000 mgr.vm01.nwhpas (mgr.14227) 792 : cluster [DBG] pgmap v436: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:01.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:01 vm01 bash[28222]: cluster 2026-04-16T19:35:00.123132+0000 mgr.vm01.nwhpas (mgr.14227) 792 : cluster [DBG] pgmap v436: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:01.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:01 vm01 bash[28222]: audit 2026-04-16T19:35:00.174524+0000 mgr.vm01.nwhpas (mgr.14227) 793 : audit [DBG] from='client.16272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:01.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:01 vm01 bash[28222]: audit 2026-04-16T19:35:00.174524+0000 mgr.vm01.nwhpas (mgr.14227) 793 : audit [DBG] from='client.16272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:03.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:03 vm04 bash[34817]: cluster 2026-04-16T19:35:02.123638+0000 mgr.vm01.nwhpas (mgr.14227) 794 : cluster [DBG] pgmap v437: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:03.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:03 vm04 bash[34817]: cluster 2026-04-16T19:35:02.123638+0000 mgr.vm01.nwhpas (mgr.14227) 794 : cluster [DBG] pgmap v437: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:03.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:03 vm01 bash[28222]: cluster 2026-04-16T19:35:02.123638+0000 mgr.vm01.nwhpas (mgr.14227) 794 : cluster [DBG] pgmap v437: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:03.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:03 vm01 bash[28222]: cluster 2026-04-16T19:35:02.123638+0000 mgr.vm01.nwhpas (mgr.14227) 794 : cluster [DBG] pgmap v437: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:05.629 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:35:05.821 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:35:05.821 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11m) 5m ago 12m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:35:05.822 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5m) 5m ago 12m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:35:05.822 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (45s) 24s ago 12m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:35:05.822 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 24s ago 12m - - 2026-04-16T19:35:05.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:05 vm04 bash[34817]: cluster 2026-04-16T19:35:04.124099+0000 mgr.vm01.nwhpas (mgr.14227) 795 : cluster [DBG] pgmap v438: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:05.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:05 vm04 bash[34817]: cluster 2026-04-16T19:35:04.124099+0000 mgr.vm01.nwhpas (mgr.14227) 795 : cluster [DBG] pgmap v438: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:05.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:05 vm01 bash[28222]: cluster 2026-04-16T19:35:04.124099+0000 mgr.vm01.nwhpas (mgr.14227) 795 : cluster [DBG] pgmap v438: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:05.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:05 vm01 bash[28222]: cluster 2026-04-16T19:35:04.124099+0000 mgr.vm01.nwhpas (mgr.14227) 795 : cluster [DBG] pgmap v438: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:06.057 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:35:06.057 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:35:06.057 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:35:06.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:06 vm04 bash[34817]: audit 2026-04-16T19:35:05.607852+0000 mgr.vm01.nwhpas (mgr.14227) 796 : audit [DBG] from='client.16280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:06.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:06 vm04 bash[34817]: audit 2026-04-16T19:35:05.607852+0000 mgr.vm01.nwhpas (mgr.14227) 796 : audit [DBG] from='client.16280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:06.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:06 vm04 bash[34817]: audit 2026-04-16T19:35:05.814119+0000 mgr.vm01.nwhpas (mgr.14227) 797 : audit [DBG] from='client.16284 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:06.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:06 vm04 bash[34817]: audit 2026-04-16T19:35:05.814119+0000 mgr.vm01.nwhpas (mgr.14227) 797 : audit [DBG] from='client.16284 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:06.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:06 vm04 bash[34817]: audit 2026-04-16T19:35:06.052193+0000 mon.vm01 (mon.0) 1242 : audit [DBG] from='client.? 192.168.123.101:0/2506873231' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:06.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:06 vm04 bash[34817]: audit 2026-04-16T19:35:06.052193+0000 mon.vm01 (mon.0) 1242 : audit [DBG] from='client.? 192.168.123.101:0/2506873231' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:06.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:06 vm01 bash[28222]: audit 2026-04-16T19:35:05.607852+0000 mgr.vm01.nwhpas (mgr.14227) 796 : audit [DBG] from='client.16280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:06.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:06 vm01 bash[28222]: audit 2026-04-16T19:35:05.607852+0000 mgr.vm01.nwhpas (mgr.14227) 796 : audit [DBG] from='client.16280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:06.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:06 vm01 bash[28222]: audit 2026-04-16T19:35:05.814119+0000 mgr.vm01.nwhpas (mgr.14227) 797 : audit [DBG] from='client.16284 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:06.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:06 vm01 bash[28222]: audit 2026-04-16T19:35:05.814119+0000 mgr.vm01.nwhpas (mgr.14227) 797 : audit [DBG] from='client.16284 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:06.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:06 vm01 bash[28222]: audit 2026-04-16T19:35:06.052193+0000 mon.vm01 (mon.0) 1242 : audit [DBG] from='client.? 192.168.123.101:0/2506873231' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:06.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:06 vm01 bash[28222]: audit 2026-04-16T19:35:06.052193+0000 mon.vm01 (mon.0) 1242 : audit [DBG] from='client.? 192.168.123.101:0/2506873231' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:07 vm04 bash[34817]: cluster 2026-04-16T19:35:06.124645+0000 mgr.vm01.nwhpas (mgr.14227) 798 : cluster [DBG] pgmap v439: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:07 vm04 bash[34817]: cluster 2026-04-16T19:35:06.124645+0000 mgr.vm01.nwhpas (mgr.14227) 798 : cluster [DBG] pgmap v439: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:07 vm04 bash[34817]: audit 2026-04-16T19:35:07.580293+0000 mon.vm01 (mon.0) 1243 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:07 vm04 bash[34817]: audit 2026-04-16T19:35:07.580293+0000 mon.vm01 (mon.0) 1243 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:07.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:07 vm01 bash[28222]: cluster 2026-04-16T19:35:06.124645+0000 mgr.vm01.nwhpas (mgr.14227) 798 : cluster [DBG] pgmap v439: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:07.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:07 vm01 bash[28222]: cluster 2026-04-16T19:35:06.124645+0000 mgr.vm01.nwhpas (mgr.14227) 798 : cluster [DBG] pgmap v439: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:07.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:07 vm01 bash[28222]: audit 2026-04-16T19:35:07.580293+0000 mon.vm01 (mon.0) 1243 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:07.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:07 vm01 bash[28222]: audit 2026-04-16T19:35:07.580293+0000 mon.vm01 (mon.0) 1243 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:09 vm04 bash[34817]: cluster 2026-04-16T19:35:08.125048+0000 mgr.vm01.nwhpas (mgr.14227) 799 : cluster [DBG] pgmap v440: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:09 vm04 bash[34817]: cluster 2026-04-16T19:35:08.125048+0000 mgr.vm01.nwhpas (mgr.14227) 799 : cluster [DBG] pgmap v440: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:09.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:09 vm01 bash[28222]: cluster 2026-04-16T19:35:08.125048+0000 mgr.vm01.nwhpas (mgr.14227) 799 : cluster [DBG] pgmap v440: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:09.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:09 vm01 bash[28222]: cluster 2026-04-16T19:35:08.125048+0000 mgr.vm01.nwhpas (mgr.14227) 799 : cluster [DBG] pgmap v440: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:11.266 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:35:11.447 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:35:11.447 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11m) 5m ago 12m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:35:11.447 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (5m) 5m ago 12m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:35:11.447 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (51s) 29s ago 12m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:35:11.447 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 29s ago 12m - - 2026-04-16T19:35:11.676 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:35:11.676 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:35:11.676 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:35:11.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:11 vm04 bash[34817]: cluster 2026-04-16T19:35:10.125503+0000 mgr.vm01.nwhpas (mgr.14227) 800 : cluster [DBG] pgmap v441: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:11.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:11 vm04 bash[34817]: cluster 2026-04-16T19:35:10.125503+0000 mgr.vm01.nwhpas (mgr.14227) 800 : cluster [DBG] pgmap v441: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:11.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:11 vm04 bash[34817]: audit 2026-04-16T19:35:11.671687+0000 mon.vm01 (mon.0) 1244 : audit [DBG] from='client.? 192.168.123.101:0/1720505445' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:11.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:11 vm04 bash[34817]: audit 2026-04-16T19:35:11.671687+0000 mon.vm01 (mon.0) 1244 : audit [DBG] from='client.? 192.168.123.101:0/1720505445' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:11.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:11 vm01 bash[28222]: cluster 2026-04-16T19:35:10.125503+0000 mgr.vm01.nwhpas (mgr.14227) 800 : cluster [DBG] pgmap v441: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:11.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:11 vm01 bash[28222]: cluster 2026-04-16T19:35:10.125503+0000 mgr.vm01.nwhpas (mgr.14227) 800 : cluster [DBG] pgmap v441: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:11.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:11 vm01 bash[28222]: audit 2026-04-16T19:35:11.671687+0000 mon.vm01 (mon.0) 1244 : audit [DBG] from='client.? 192.168.123.101:0/1720505445' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:11.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:11 vm01 bash[28222]: audit 2026-04-16T19:35:11.671687+0000 mon.vm01 (mon.0) 1244 : audit [DBG] from='client.? 192.168.123.101:0/1720505445' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:12.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:12 vm04 bash[34817]: audit 2026-04-16T19:35:11.243924+0000 mgr.vm01.nwhpas (mgr.14227) 801 : audit [DBG] from='client.16292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:12.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:12 vm04 bash[34817]: audit 2026-04-16T19:35:11.243924+0000 mgr.vm01.nwhpas (mgr.14227) 801 : audit [DBG] from='client.16292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:12.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:12 vm04 bash[34817]: audit 2026-04-16T19:35:11.439285+0000 mgr.vm01.nwhpas (mgr.14227) 802 : audit [DBG] from='client.16296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:12.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:12 vm04 bash[34817]: audit 2026-04-16T19:35:11.439285+0000 mgr.vm01.nwhpas (mgr.14227) 802 : audit [DBG] from='client.16296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:12.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:12 vm01 bash[28222]: audit 2026-04-16T19:35:11.243924+0000 mgr.vm01.nwhpas (mgr.14227) 801 : audit [DBG] from='client.16292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:12.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:12 vm01 bash[28222]: audit 2026-04-16T19:35:11.243924+0000 mgr.vm01.nwhpas (mgr.14227) 801 : audit [DBG] from='client.16292 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:12.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:12 vm01 bash[28222]: audit 2026-04-16T19:35:11.439285+0000 mgr.vm01.nwhpas (mgr.14227) 802 : audit [DBG] from='client.16296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:12.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:12 vm01 bash[28222]: audit 2026-04-16T19:35:11.439285+0000 mgr.vm01.nwhpas (mgr.14227) 802 : audit [DBG] from='client.16296 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:13 vm04 bash[34817]: cluster 2026-04-16T19:35:12.126007+0000 mgr.vm01.nwhpas (mgr.14227) 803 : cluster [DBG] pgmap v442: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:13 vm04 bash[34817]: cluster 2026-04-16T19:35:12.126007+0000 mgr.vm01.nwhpas (mgr.14227) 803 : cluster [DBG] pgmap v442: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:13.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:13 vm01 bash[28222]: cluster 2026-04-16T19:35:12.126007+0000 mgr.vm01.nwhpas (mgr.14227) 803 : cluster [DBG] pgmap v442: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:13.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:13 vm01 bash[28222]: cluster 2026-04-16T19:35:12.126007+0000 mgr.vm01.nwhpas (mgr.14227) 803 : cluster [DBG] pgmap v442: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:15.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:15 vm04 bash[34817]: cluster 2026-04-16T19:35:14.126629+0000 mgr.vm01.nwhpas (mgr.14227) 804 : cluster [DBG] pgmap v443: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:15.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:15 vm04 bash[34817]: cluster 2026-04-16T19:35:14.126629+0000 mgr.vm01.nwhpas (mgr.14227) 804 : cluster [DBG] pgmap v443: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:15.962 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:15 vm01 bash[28222]: cluster 2026-04-16T19:35:14.126629+0000 mgr.vm01.nwhpas (mgr.14227) 804 : cluster [DBG] pgmap v443: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:15.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:15 vm01 bash[28222]: cluster 2026-04-16T19:35:14.126629+0000 mgr.vm01.nwhpas (mgr.14227) 804 : cluster [DBG] pgmap v443: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:16.896 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:35:17.081 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:35:17.081 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11m) 5m ago 12m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:35:17.082 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (6m) 5m ago 12m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:35:17.082 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (56s) 35s ago 12m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:35:17.082 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 35s ago 12m - - 2026-04-16T19:35:17.361 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:35:17.361 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:35:17.361 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:35:17.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:17 vm04 bash[34817]: cluster 2026-04-16T19:35:16.127021+0000 mgr.vm01.nwhpas (mgr.14227) 805 : cluster [DBG] pgmap v444: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:17.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:17 vm04 bash[34817]: cluster 2026-04-16T19:35:16.127021+0000 mgr.vm01.nwhpas (mgr.14227) 805 : cluster [DBG] pgmap v444: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:17.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:17 vm04 bash[34817]: audit 2026-04-16T19:35:16.868817+0000 mgr.vm01.nwhpas (mgr.14227) 806 : audit [DBG] from='client.16304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:17.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:17 vm04 bash[34817]: audit 2026-04-16T19:35:16.868817+0000 mgr.vm01.nwhpas (mgr.14227) 806 : audit [DBG] from='client.16304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:17.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:17 vm04 bash[34817]: audit 2026-04-16T19:35:17.356225+0000 mon.vm01 (mon.0) 1245 : audit [DBG] from='client.? 192.168.123.101:0/3080203274' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:17.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:17 vm04 bash[34817]: audit 2026-04-16T19:35:17.356225+0000 mon.vm01 (mon.0) 1245 : audit [DBG] from='client.? 192.168.123.101:0/3080203274' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:17.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:17 vm01 bash[28222]: cluster 2026-04-16T19:35:16.127021+0000 mgr.vm01.nwhpas (mgr.14227) 805 : cluster [DBG] pgmap v444: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:17.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:17 vm01 bash[28222]: cluster 2026-04-16T19:35:16.127021+0000 mgr.vm01.nwhpas (mgr.14227) 805 : cluster [DBG] pgmap v444: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:17.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:17 vm01 bash[28222]: audit 2026-04-16T19:35:16.868817+0000 mgr.vm01.nwhpas (mgr.14227) 806 : audit [DBG] from='client.16304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:17.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:17 vm01 bash[28222]: audit 2026-04-16T19:35:16.868817+0000 mgr.vm01.nwhpas (mgr.14227) 806 : audit [DBG] from='client.16304 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:17.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:17 vm01 bash[28222]: audit 2026-04-16T19:35:17.356225+0000 mon.vm01 (mon.0) 1245 : audit [DBG] from='client.? 192.168.123.101:0/3080203274' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:17.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:17 vm01 bash[28222]: audit 2026-04-16T19:35:17.356225+0000 mon.vm01 (mon.0) 1245 : audit [DBG] from='client.? 192.168.123.101:0/3080203274' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:19.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:18 vm04 bash[34817]: audit 2026-04-16T19:35:17.073685+0000 mgr.vm01.nwhpas (mgr.14227) 807 : audit [DBG] from='client.16308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:19.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:18 vm04 bash[34817]: audit 2026-04-16T19:35:17.073685+0000 mgr.vm01.nwhpas (mgr.14227) 807 : audit [DBG] from='client.16308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:19.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:18 vm01 bash[28222]: audit 2026-04-16T19:35:17.073685+0000 mgr.vm01.nwhpas (mgr.14227) 807 : audit [DBG] from='client.16308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:19.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:18 vm01 bash[28222]: audit 2026-04-16T19:35:17.073685+0000 mgr.vm01.nwhpas (mgr.14227) 807 : audit [DBG] from='client.16308 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:20.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:19 vm04 bash[34817]: cluster 2026-04-16T19:35:18.127397+0000 mgr.vm01.nwhpas (mgr.14227) 808 : cluster [DBG] pgmap v445: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:20.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:19 vm04 bash[34817]: cluster 2026-04-16T19:35:18.127397+0000 mgr.vm01.nwhpas (mgr.14227) 808 : cluster [DBG] pgmap v445: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:20.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:19 vm01 bash[28222]: cluster 2026-04-16T19:35:18.127397+0000 mgr.vm01.nwhpas (mgr.14227) 808 : cluster [DBG] pgmap v445: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:20.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:19 vm01 bash[28222]: cluster 2026-04-16T19:35:18.127397+0000 mgr.vm01.nwhpas (mgr.14227) 808 : cluster [DBG] pgmap v445: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:22.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:21 vm04 bash[34817]: cluster 2026-04-16T19:35:20.127785+0000 mgr.vm01.nwhpas (mgr.14227) 809 : cluster [DBG] pgmap v446: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:22.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:21 vm04 bash[34817]: cluster 2026-04-16T19:35:20.127785+0000 mgr.vm01.nwhpas (mgr.14227) 809 : cluster [DBG] pgmap v446: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:22.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:21 vm01 bash[28222]: cluster 2026-04-16T19:35:20.127785+0000 mgr.vm01.nwhpas (mgr.14227) 809 : cluster [DBG] pgmap v446: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:22.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:21 vm01 bash[28222]: cluster 2026-04-16T19:35:20.127785+0000 mgr.vm01.nwhpas (mgr.14227) 809 : cluster [DBG] pgmap v446: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:22.607 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:35:22.830 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:35:22.830 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11m) 6m ago 12m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:35:22.830 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (6m) 6m ago 12m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:35:22.830 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (62s) 41s ago 12m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:35:22.831 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 41s ago 12m - - 2026-04-16T19:35:23.115 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:35:23.115 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:35:23.115 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:35:23.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:22 vm04 bash[34817]: audit 2026-04-16T19:35:22.583564+0000 mon.vm01 (mon.0) 1246 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:23.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:22 vm04 bash[34817]: audit 2026-04-16T19:35:22.583564+0000 mon.vm01 (mon.0) 1246 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:23.212 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:22 vm01 bash[28222]: audit 2026-04-16T19:35:22.583564+0000 mon.vm01 (mon.0) 1246 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:23.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:22 vm01 bash[28222]: audit 2026-04-16T19:35:22.583564+0000 mon.vm01 (mon.0) 1246 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:24.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:23 vm04 bash[34817]: cluster 2026-04-16T19:35:22.128174+0000 mgr.vm01.nwhpas (mgr.14227) 810 : cluster [DBG] pgmap v447: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:24.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:23 vm04 bash[34817]: cluster 2026-04-16T19:35:22.128174+0000 mgr.vm01.nwhpas (mgr.14227) 810 : cluster [DBG] pgmap v447: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:24.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:23 vm04 bash[34817]: audit 2026-04-16T19:35:22.577322+0000 mgr.vm01.nwhpas (mgr.14227) 811 : audit [DBG] from='client.25491 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:24.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:23 vm04 bash[34817]: audit 2026-04-16T19:35:22.577322+0000 mgr.vm01.nwhpas (mgr.14227) 811 : audit [DBG] from='client.25491 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:24.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:23 vm04 bash[34817]: audit 2026-04-16T19:35:22.822763+0000 mgr.vm01.nwhpas (mgr.14227) 812 : audit [DBG] from='client.16320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:24.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:23 vm04 bash[34817]: audit 2026-04-16T19:35:22.822763+0000 mgr.vm01.nwhpas (mgr.14227) 812 : audit [DBG] from='client.16320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:24.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:23 vm04 bash[34817]: audit 2026-04-16T19:35:23.110106+0000 mon.vm01 (mon.0) 1247 : audit [DBG] from='client.? 192.168.123.101:0/2784259516' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:24.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:23 vm04 bash[34817]: audit 2026-04-16T19:35:23.110106+0000 mon.vm01 (mon.0) 1247 : audit [DBG] from='client.? 192.168.123.101:0/2784259516' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:24.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:23 vm01 bash[28222]: cluster 2026-04-16T19:35:22.128174+0000 mgr.vm01.nwhpas (mgr.14227) 810 : cluster [DBG] pgmap v447: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:24.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:23 vm01 bash[28222]: cluster 2026-04-16T19:35:22.128174+0000 mgr.vm01.nwhpas (mgr.14227) 810 : cluster [DBG] pgmap v447: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:24.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:23 vm01 bash[28222]: audit 2026-04-16T19:35:22.577322+0000 mgr.vm01.nwhpas (mgr.14227) 811 : audit [DBG] from='client.25491 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:24.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:23 vm01 bash[28222]: audit 2026-04-16T19:35:22.577322+0000 mgr.vm01.nwhpas (mgr.14227) 811 : audit [DBG] from='client.25491 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:24.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:23 vm01 bash[28222]: audit 2026-04-16T19:35:22.822763+0000 mgr.vm01.nwhpas (mgr.14227) 812 : audit [DBG] from='client.16320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:24.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:23 vm01 bash[28222]: audit 2026-04-16T19:35:22.822763+0000 mgr.vm01.nwhpas (mgr.14227) 812 : audit [DBG] from='client.16320 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:24.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:23 vm01 bash[28222]: audit 2026-04-16T19:35:23.110106+0000 mon.vm01 (mon.0) 1247 : audit [DBG] from='client.? 192.168.123.101:0/2784259516' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:24.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:23 vm01 bash[28222]: audit 2026-04-16T19:35:23.110106+0000 mon.vm01 (mon.0) 1247 : audit [DBG] from='client.? 192.168.123.101:0/2784259516' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:26.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:25 vm04 bash[34817]: cluster 2026-04-16T19:35:24.128660+0000 mgr.vm01.nwhpas (mgr.14227) 813 : cluster [DBG] pgmap v448: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:26.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:25 vm04 bash[34817]: cluster 2026-04-16T19:35:24.128660+0000 mgr.vm01.nwhpas (mgr.14227) 813 : cluster [DBG] pgmap v448: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:26.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:25 vm01 bash[28222]: cluster 2026-04-16T19:35:24.128660+0000 mgr.vm01.nwhpas (mgr.14227) 813 : cluster [DBG] pgmap v448: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:26.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:25 vm01 bash[28222]: cluster 2026-04-16T19:35:24.128660+0000 mgr.vm01.nwhpas (mgr.14227) 813 : cluster [DBG] pgmap v448: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:28.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:27 vm04 bash[34817]: cluster 2026-04-16T19:35:26.129135+0000 mgr.vm01.nwhpas (mgr.14227) 814 : cluster [DBG] pgmap v449: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:28.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:27 vm04 bash[34817]: cluster 2026-04-16T19:35:26.129135+0000 mgr.vm01.nwhpas (mgr.14227) 814 : cluster [DBG] pgmap v449: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:28.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:27 vm01 bash[28222]: cluster 2026-04-16T19:35:26.129135+0000 mgr.vm01.nwhpas (mgr.14227) 814 : cluster [DBG] pgmap v449: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:28.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:27 vm01 bash[28222]: cluster 2026-04-16T19:35:26.129135+0000 mgr.vm01.nwhpas (mgr.14227) 814 : cluster [DBG] pgmap v449: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:28.323 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:35:28.513 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:35:28.514 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11m) 6m ago 12m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:35:28.514 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (6m) 6m ago 12m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:35:28.514 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (68s) 46s ago 12m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:35:28.514 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 46s ago 12m - - 2026-04-16T19:35:28.750 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:35:28.750 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:35:28.750 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:35:29.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:28 vm04 bash[34817]: audit 2026-04-16T19:35:28.741514+0000 mon.vm01 (mon.0) 1248 : audit [DBG] from='client.? 192.168.123.101:0/1583713995' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:29.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:28 vm04 bash[34817]: audit 2026-04-16T19:35:28.741514+0000 mon.vm01 (mon.0) 1248 : audit [DBG] from='client.? 192.168.123.101:0/1583713995' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:29.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:28 vm01 bash[28222]: audit 2026-04-16T19:35:28.741514+0000 mon.vm01 (mon.0) 1248 : audit [DBG] from='client.? 192.168.123.101:0/1583713995' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:29.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:28 vm01 bash[28222]: audit 2026-04-16T19:35:28.741514+0000 mon.vm01 (mon.0) 1248 : audit [DBG] from='client.? 192.168.123.101:0/1583713995' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:30.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:29 vm04 bash[34817]: cluster 2026-04-16T19:35:28.129531+0000 mgr.vm01.nwhpas (mgr.14227) 815 : cluster [DBG] pgmap v450: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:35:30.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:29 vm04 bash[34817]: cluster 2026-04-16T19:35:28.129531+0000 mgr.vm01.nwhpas (mgr.14227) 815 : cluster [DBG] pgmap v450: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:35:30.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:29 vm04 bash[34817]: audit 2026-04-16T19:35:28.301026+0000 mgr.vm01.nwhpas (mgr.14227) 816 : audit [DBG] from='client.16328 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:30.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:29 vm04 bash[34817]: audit 2026-04-16T19:35:28.301026+0000 mgr.vm01.nwhpas (mgr.14227) 816 : audit [DBG] from='client.16328 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:30.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:29 vm04 bash[34817]: audit 2026-04-16T19:35:28.505560+0000 mgr.vm01.nwhpas (mgr.14227) 817 : audit [DBG] from='client.16332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:30.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:29 vm04 bash[34817]: audit 2026-04-16T19:35:28.505560+0000 mgr.vm01.nwhpas (mgr.14227) 817 : audit [DBG] from='client.16332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:30.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:29 vm01 bash[28222]: cluster 2026-04-16T19:35:28.129531+0000 mgr.vm01.nwhpas (mgr.14227) 815 : cluster [DBG] pgmap v450: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:35:30.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:29 vm01 bash[28222]: cluster 2026-04-16T19:35:28.129531+0000 mgr.vm01.nwhpas (mgr.14227) 815 : cluster [DBG] pgmap v450: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:35:30.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:29 vm01 bash[28222]: audit 2026-04-16T19:35:28.301026+0000 mgr.vm01.nwhpas (mgr.14227) 816 : audit [DBG] from='client.16328 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:30.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:29 vm01 bash[28222]: audit 2026-04-16T19:35:28.301026+0000 mgr.vm01.nwhpas (mgr.14227) 816 : audit [DBG] from='client.16328 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:30.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:29 vm01 bash[28222]: audit 2026-04-16T19:35:28.505560+0000 mgr.vm01.nwhpas (mgr.14227) 817 : audit [DBG] from='client.16332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:30.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:29 vm01 bash[28222]: audit 2026-04-16T19:35:28.505560+0000 mgr.vm01.nwhpas (mgr.14227) 817 : audit [DBG] from='client.16332 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:32.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:31 vm04 bash[34817]: cluster 2026-04-16T19:35:30.130001+0000 mgr.vm01.nwhpas (mgr.14227) 818 : cluster [DBG] pgmap v451: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:32.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:31 vm04 bash[34817]: cluster 2026-04-16T19:35:30.130001+0000 mgr.vm01.nwhpas (mgr.14227) 818 : cluster [DBG] pgmap v451: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:32.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:31 vm01 bash[28222]: cluster 2026-04-16T19:35:30.130001+0000 mgr.vm01.nwhpas (mgr.14227) 818 : cluster [DBG] pgmap v451: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:32.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:31 vm01 bash[28222]: cluster 2026-04-16T19:35:30.130001+0000 mgr.vm01.nwhpas (mgr.14227) 818 : cluster [DBG] pgmap v451: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:33.960 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:35:34.155 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:35:34.155 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11m) 6m ago 12m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:35:34.155 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (6m) 6m ago 12m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:35:34.155 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (74s) 52s ago 12m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:35:34.155 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 52s ago 12m - - 2026-04-16T19:35:34.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:33 vm04 bash[34817]: cluster 2026-04-16T19:35:32.130523+0000 mgr.vm01.nwhpas (mgr.14227) 819 : cluster [DBG] pgmap v452: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:34.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:33 vm04 bash[34817]: cluster 2026-04-16T19:35:32.130523+0000 mgr.vm01.nwhpas (mgr.14227) 819 : cluster [DBG] pgmap v452: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:34.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:33 vm01 bash[28222]: cluster 2026-04-16T19:35:32.130523+0000 mgr.vm01.nwhpas (mgr.14227) 819 : cluster [DBG] pgmap v452: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:34.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:33 vm01 bash[28222]: cluster 2026-04-16T19:35:32.130523+0000 mgr.vm01.nwhpas (mgr.14227) 819 : cluster [DBG] pgmap v452: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:34.401 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:35:34.401 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:35:34.401 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:35:35.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:34 vm04 bash[34817]: audit 2026-04-16T19:35:33.936661+0000 mgr.vm01.nwhpas (mgr.14227) 820 : audit [DBG] from='client.16340 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:35.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:34 vm04 bash[34817]: audit 2026-04-16T19:35:33.936661+0000 mgr.vm01.nwhpas (mgr.14227) 820 : audit [DBG] from='client.16340 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:35.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:34 vm04 bash[34817]: cluster 2026-04-16T19:35:34.130960+0000 mgr.vm01.nwhpas (mgr.14227) 821 : cluster [DBG] pgmap v453: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:35.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:34 vm04 bash[34817]: cluster 2026-04-16T19:35:34.130960+0000 mgr.vm01.nwhpas (mgr.14227) 821 : cluster [DBG] pgmap v453: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:35.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:34 vm04 bash[34817]: audit 2026-04-16T19:35:34.147278+0000 mgr.vm01.nwhpas (mgr.14227) 822 : audit [DBG] from='client.16344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:35.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:34 vm04 bash[34817]: audit 2026-04-16T19:35:34.147278+0000 mgr.vm01.nwhpas (mgr.14227) 822 : audit [DBG] from='client.16344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:35.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:34 vm04 bash[34817]: audit 2026-04-16T19:35:34.396254+0000 mon.vm01 (mon.0) 1249 : audit [DBG] from='client.? 192.168.123.101:0/1399765913' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:35.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:34 vm04 bash[34817]: audit 2026-04-16T19:35:34.396254+0000 mon.vm01 (mon.0) 1249 : audit [DBG] from='client.? 192.168.123.101:0/1399765913' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:35.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:34 vm01 bash[28222]: audit 2026-04-16T19:35:33.936661+0000 mgr.vm01.nwhpas (mgr.14227) 820 : audit [DBG] from='client.16340 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:35.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:34 vm01 bash[28222]: audit 2026-04-16T19:35:33.936661+0000 mgr.vm01.nwhpas (mgr.14227) 820 : audit [DBG] from='client.16340 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:35.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:34 vm01 bash[28222]: cluster 2026-04-16T19:35:34.130960+0000 mgr.vm01.nwhpas (mgr.14227) 821 : cluster [DBG] pgmap v453: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:35.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:34 vm01 bash[28222]: cluster 2026-04-16T19:35:34.130960+0000 mgr.vm01.nwhpas (mgr.14227) 821 : cluster [DBG] pgmap v453: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:35.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:34 vm01 bash[28222]: audit 2026-04-16T19:35:34.147278+0000 mgr.vm01.nwhpas (mgr.14227) 822 : audit [DBG] from='client.16344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:35.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:34 vm01 bash[28222]: audit 2026-04-16T19:35:34.147278+0000 mgr.vm01.nwhpas (mgr.14227) 822 : audit [DBG] from='client.16344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:35.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:34 vm01 bash[28222]: audit 2026-04-16T19:35:34.396254+0000 mon.vm01 (mon.0) 1249 : audit [DBG] from='client.? 192.168.123.101:0/1399765913' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:35.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:34 vm01 bash[28222]: audit 2026-04-16T19:35:34.396254+0000 mon.vm01 (mon.0) 1249 : audit [DBG] from='client.? 192.168.123.101:0/1399765913' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:37.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:37 vm04 bash[34817]: cluster 2026-04-16T19:35:36.131403+0000 mgr.vm01.nwhpas (mgr.14227) 823 : cluster [DBG] pgmap v454: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:37.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:37 vm04 bash[34817]: cluster 2026-04-16T19:35:36.131403+0000 mgr.vm01.nwhpas (mgr.14227) 823 : cluster [DBG] pgmap v454: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:37.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:37 vm01 bash[28222]: cluster 2026-04-16T19:35:36.131403+0000 mgr.vm01.nwhpas (mgr.14227) 823 : cluster [DBG] pgmap v454: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:37.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:37 vm01 bash[28222]: cluster 2026-04-16T19:35:36.131403+0000 mgr.vm01.nwhpas (mgr.14227) 823 : cluster [DBG] pgmap v454: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:38.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:38 vm04 bash[34817]: audit 2026-04-16T19:35:37.583775+0000 mon.vm01 (mon.0) 1250 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:38.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:38 vm04 bash[34817]: audit 2026-04-16T19:35:37.583775+0000 mon.vm01 (mon.0) 1250 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:38.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:38 vm01 bash[28222]: audit 2026-04-16T19:35:37.583775+0000 mon.vm01 (mon.0) 1250 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:38.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:38 vm01 bash[28222]: audit 2026-04-16T19:35:37.583775+0000 mon.vm01 (mon.0) 1250 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:39.624 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:35:39.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:39 vm04 bash[34817]: cluster 2026-04-16T19:35:38.131880+0000 mgr.vm01.nwhpas (mgr.14227) 824 : cluster [DBG] pgmap v455: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:39.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:39 vm04 bash[34817]: cluster 2026-04-16T19:35:38.131880+0000 mgr.vm01.nwhpas (mgr.14227) 824 : cluster [DBG] pgmap v455: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:39.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:39 vm01 bash[28222]: cluster 2026-04-16T19:35:38.131880+0000 mgr.vm01.nwhpas (mgr.14227) 824 : cluster [DBG] pgmap v455: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:39.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:39 vm01 bash[28222]: cluster 2026-04-16T19:35:38.131880+0000 mgr.vm01.nwhpas (mgr.14227) 824 : cluster [DBG] pgmap v455: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:35:39.822 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:35:39.822 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11m) 6m ago 12m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:35:39.822 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (6m) 6m ago 12m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:35:39.822 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (79s) 58s ago 12m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:35:39.822 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 58s ago 12m - - 2026-04-16T19:35:40.071 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:35:40.071 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:35:40.071 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:35:40.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:40 vm04 bash[34817]: audit 2026-04-16T19:35:39.602017+0000 mgr.vm01.nwhpas (mgr.14227) 825 : audit [DBG] from='client.16352 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:40.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:40 vm04 bash[34817]: audit 2026-04-16T19:35:39.602017+0000 mgr.vm01.nwhpas (mgr.14227) 825 : audit [DBG] from='client.16352 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:40.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:40 vm04 bash[34817]: audit 2026-04-16T19:35:39.813609+0000 mgr.vm01.nwhpas (mgr.14227) 826 : audit [DBG] from='client.16356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:40.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:40 vm04 bash[34817]: audit 2026-04-16T19:35:39.813609+0000 mgr.vm01.nwhpas (mgr.14227) 826 : audit [DBG] from='client.16356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:40.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:40 vm04 bash[34817]: audit 2026-04-16T19:35:40.066208+0000 mon.vm01 (mon.0) 1251 : audit [DBG] from='client.? 192.168.123.101:0/3104397927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:40.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:40 vm04 bash[34817]: audit 2026-04-16T19:35:40.066208+0000 mon.vm01 (mon.0) 1251 : audit [DBG] from='client.? 192.168.123.101:0/3104397927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:40.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:40 vm01 bash[28222]: audit 2026-04-16T19:35:39.602017+0000 mgr.vm01.nwhpas (mgr.14227) 825 : audit [DBG] from='client.16352 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:40.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:40 vm01 bash[28222]: audit 2026-04-16T19:35:39.602017+0000 mgr.vm01.nwhpas (mgr.14227) 825 : audit [DBG] from='client.16352 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:40.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:40 vm01 bash[28222]: audit 2026-04-16T19:35:39.813609+0000 mgr.vm01.nwhpas (mgr.14227) 826 : audit [DBG] from='client.16356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:40.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:40 vm01 bash[28222]: audit 2026-04-16T19:35:39.813609+0000 mgr.vm01.nwhpas (mgr.14227) 826 : audit [DBG] from='client.16356 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:40.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:40 vm01 bash[28222]: audit 2026-04-16T19:35:40.066208+0000 mon.vm01 (mon.0) 1251 : audit [DBG] from='client.? 192.168.123.101:0/3104397927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:40.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:40 vm01 bash[28222]: audit 2026-04-16T19:35:40.066208+0000 mon.vm01 (mon.0) 1251 : audit [DBG] from='client.? 192.168.123.101:0/3104397927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:41.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:41 vm04 bash[34817]: cluster 2026-04-16T19:35:40.132365+0000 mgr.vm01.nwhpas (mgr.14227) 827 : cluster [DBG] pgmap v456: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:41.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:41 vm04 bash[34817]: cluster 2026-04-16T19:35:40.132365+0000 mgr.vm01.nwhpas (mgr.14227) 827 : cluster [DBG] pgmap v456: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:41.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:41 vm01 bash[28222]: cluster 2026-04-16T19:35:40.132365+0000 mgr.vm01.nwhpas (mgr.14227) 827 : cluster [DBG] pgmap v456: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:41.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:41 vm01 bash[28222]: cluster 2026-04-16T19:35:40.132365+0000 mgr.vm01.nwhpas (mgr.14227) 827 : cluster [DBG] pgmap v456: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:42.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:42 vm04 bash[34817]: audit 2026-04-16T19:35:42.140768+0000 mon.vm01 (mon.0) 1252 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:35:42.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:42 vm04 bash[34817]: audit 2026-04-16T19:35:42.140768+0000 mon.vm01 (mon.0) 1252 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:35:42.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:42 vm01 bash[28222]: audit 2026-04-16T19:35:42.140768+0000 mon.vm01 (mon.0) 1252 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:35:42.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:42 vm01 bash[28222]: audit 2026-04-16T19:35:42.140768+0000 mon.vm01 (mon.0) 1252 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:35:43.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: cluster 2026-04-16T19:35:42.132816+0000 mgr.vm01.nwhpas (mgr.14227) 828 : cluster [DBG] pgmap v457: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: cluster 2026-04-16T19:35:42.132816+0000 mgr.vm01.nwhpas (mgr.14227) 828 : cluster [DBG] pgmap v457: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: audit 2026-04-16T19:35:42.492738+0000 mon.vm01 (mon.0) 1253 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: audit 2026-04-16T19:35:42.492738+0000 mon.vm01 (mon.0) 1253 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: audit 2026-04-16T19:35:42.493291+0000 mon.vm01 (mon.0) 1254 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: audit 2026-04-16T19:35:42.493291+0000 mon.vm01 (mon.0) 1254 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: cluster 2026-04-16T19:35:42.494566+0000 mgr.vm01.nwhpas (mgr.14227) 829 : cluster [DBG] pgmap v458: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: cluster 2026-04-16T19:35:42.494566+0000 mgr.vm01.nwhpas (mgr.14227) 829 : cluster [DBG] pgmap v458: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: cluster 2026-04-16T19:35:42.495053+0000 mgr.vm01.nwhpas (mgr.14227) 830 : cluster [DBG] pgmap v459: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: cluster 2026-04-16T19:35:42.495053+0000 mgr.vm01.nwhpas (mgr.14227) 830 : cluster [DBG] pgmap v459: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: audit 2026-04-16T19:35:42.499037+0000 mon.vm01 (mon.0) 1255 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: audit 2026-04-16T19:35:42.499037+0000 mon.vm01 (mon.0) 1255 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: audit 2026-04-16T19:35:42.500834+0000 mon.vm01 (mon.0) 1256 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:35:43.712 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:43 vm04 bash[34817]: audit 2026-04-16T19:35:42.500834+0000 mon.vm01 (mon.0) 1256 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: cluster 2026-04-16T19:35:42.132816+0000 mgr.vm01.nwhpas (mgr.14227) 828 : cluster [DBG] pgmap v457: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: cluster 2026-04-16T19:35:42.132816+0000 mgr.vm01.nwhpas (mgr.14227) 828 : cluster [DBG] pgmap v457: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: audit 2026-04-16T19:35:42.492738+0000 mon.vm01 (mon.0) 1253 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: audit 2026-04-16T19:35:42.492738+0000 mon.vm01 (mon.0) 1253 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: audit 2026-04-16T19:35:42.493291+0000 mon.vm01 (mon.0) 1254 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: audit 2026-04-16T19:35:42.493291+0000 mon.vm01 (mon.0) 1254 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: cluster 2026-04-16T19:35:42.494566+0000 mgr.vm01.nwhpas (mgr.14227) 829 : cluster [DBG] pgmap v458: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: cluster 2026-04-16T19:35:42.494566+0000 mgr.vm01.nwhpas (mgr.14227) 829 : cluster [DBG] pgmap v458: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: cluster 2026-04-16T19:35:42.495053+0000 mgr.vm01.nwhpas (mgr.14227) 830 : cluster [DBG] pgmap v459: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: cluster 2026-04-16T19:35:42.495053+0000 mgr.vm01.nwhpas (mgr.14227) 830 : cluster [DBG] pgmap v459: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: audit 2026-04-16T19:35:42.499037+0000 mon.vm01 (mon.0) 1255 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: audit 2026-04-16T19:35:42.499037+0000 mon.vm01 (mon.0) 1255 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: audit 2026-04-16T19:35:42.500834+0000 mon.vm01 (mon.0) 1256 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:35:43.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:43 vm01 bash[28222]: audit 2026-04-16T19:35:42.500834+0000 mon.vm01 (mon.0) 1256 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:35:45.297 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:35:45.484 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:35:45.484 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11m) 6m ago 12m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:35:45.484 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (6m) 6m ago 12m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:35:45.484 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (85s) 63s ago 12m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:35:45.484 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 63s ago 12m - - 2026-04-16T19:35:45.721 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:35:45.721 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:35:45.721 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:35:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:45 vm04 bash[34817]: cluster 2026-04-16T19:35:44.495490+0000 mgr.vm01.nwhpas (mgr.14227) 831 : cluster [DBG] pgmap v460: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:45 vm04 bash[34817]: cluster 2026-04-16T19:35:44.495490+0000 mgr.vm01.nwhpas (mgr.14227) 831 : cluster [DBG] pgmap v460: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:46.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:45 vm01 bash[28222]: cluster 2026-04-16T19:35:44.495490+0000 mgr.vm01.nwhpas (mgr.14227) 831 : cluster [DBG] pgmap v460: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:46.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:45 vm01 bash[28222]: cluster 2026-04-16T19:35:44.495490+0000 mgr.vm01.nwhpas (mgr.14227) 831 : cluster [DBG] pgmap v460: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:46.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:46 vm01 bash[28222]: audit 2026-04-16T19:35:45.272043+0000 mgr.vm01.nwhpas (mgr.14227) 832 : audit [DBG] from='client.16364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:46.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:46 vm01 bash[28222]: audit 2026-04-16T19:35:45.272043+0000 mgr.vm01.nwhpas (mgr.14227) 832 : audit [DBG] from='client.16364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:46.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:46 vm01 bash[28222]: audit 2026-04-16T19:35:45.475228+0000 mgr.vm01.nwhpas (mgr.14227) 833 : audit [DBG] from='client.16368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:46.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:46 vm01 bash[28222]: audit 2026-04-16T19:35:45.475228+0000 mgr.vm01.nwhpas (mgr.14227) 833 : audit [DBG] from='client.16368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:46.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:46 vm01 bash[28222]: audit 2026-04-16T19:35:45.715960+0000 mon.vm01 (mon.0) 1257 : audit [DBG] from='client.? 192.168.123.101:0/3187877752' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:46.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:46 vm01 bash[28222]: audit 2026-04-16T19:35:45.715960+0000 mon.vm01 (mon.0) 1257 : audit [DBG] from='client.? 192.168.123.101:0/3187877752' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:46 vm04 bash[34817]: audit 2026-04-16T19:35:45.272043+0000 mgr.vm01.nwhpas (mgr.14227) 832 : audit [DBG] from='client.16364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:46 vm04 bash[34817]: audit 2026-04-16T19:35:45.272043+0000 mgr.vm01.nwhpas (mgr.14227) 832 : audit [DBG] from='client.16364 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:46 vm04 bash[34817]: audit 2026-04-16T19:35:45.475228+0000 mgr.vm01.nwhpas (mgr.14227) 833 : audit [DBG] from='client.16368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:47.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:46 vm04 bash[34817]: audit 2026-04-16T19:35:45.475228+0000 mgr.vm01.nwhpas (mgr.14227) 833 : audit [DBG] from='client.16368 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:47.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:46 vm04 bash[34817]: audit 2026-04-16T19:35:45.715960+0000 mon.vm01 (mon.0) 1257 : audit [DBG] from='client.? 192.168.123.101:0/3187877752' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:47.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:46 vm04 bash[34817]: audit 2026-04-16T19:35:45.715960+0000 mon.vm01 (mon.0) 1257 : audit [DBG] from='client.? 192.168.123.101:0/3187877752' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:47 vm04 bash[34817]: cluster 2026-04-16T19:35:46.495863+0000 mgr.vm01.nwhpas (mgr.14227) 834 : cluster [DBG] pgmap v461: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:47 vm04 bash[34817]: cluster 2026-04-16T19:35:46.495863+0000 mgr.vm01.nwhpas (mgr.14227) 834 : cluster [DBG] pgmap v461: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:48.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:47 vm01 bash[28222]: cluster 2026-04-16T19:35:46.495863+0000 mgr.vm01.nwhpas (mgr.14227) 834 : cluster [DBG] pgmap v461: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:48.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:47 vm01 bash[28222]: cluster 2026-04-16T19:35:46.495863+0000 mgr.vm01.nwhpas (mgr.14227) 834 : cluster [DBG] pgmap v461: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:35:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:49 vm04 bash[34817]: cluster 2026-04-16T19:35:48.496256+0000 mgr.vm01.nwhpas (mgr.14227) 835 : cluster [DBG] pgmap v462: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-16T19:35:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:49 vm04 bash[34817]: cluster 2026-04-16T19:35:48.496256+0000 mgr.vm01.nwhpas (mgr.14227) 835 : cluster [DBG] pgmap v462: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-16T19:35:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:49 vm01 bash[28222]: cluster 2026-04-16T19:35:48.496256+0000 mgr.vm01.nwhpas (mgr.14227) 835 : cluster [DBG] pgmap v462: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-16T19:35:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:49 vm01 bash[28222]: cluster 2026-04-16T19:35:48.496256+0000 mgr.vm01.nwhpas (mgr.14227) 835 : cluster [DBG] pgmap v462: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-16T19:35:50.958 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:35:51.159 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:35:51.159 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (11m) 6m ago 12m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:35:51.159 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (6m) 6m ago 12m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:35:51.159 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (91s) 69s ago 12m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:35:51.159 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 69s ago 12m - - 2026-04-16T19:35:51.414 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:35:51.414 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:35:51.414 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:35:52.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:51 vm04 bash[34817]: cluster 2026-04-16T19:35:50.496667+0000 mgr.vm01.nwhpas (mgr.14227) 836 : cluster [DBG] pgmap v463: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-16T19:35:52.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:51 vm04 bash[34817]: cluster 2026-04-16T19:35:50.496667+0000 mgr.vm01.nwhpas (mgr.14227) 836 : cluster [DBG] pgmap v463: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-16T19:35:52.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:51 vm04 bash[34817]: audit 2026-04-16T19:35:50.931835+0000 mgr.vm01.nwhpas (mgr.14227) 837 : audit [DBG] from='client.16376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:52.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:51 vm04 bash[34817]: audit 2026-04-16T19:35:50.931835+0000 mgr.vm01.nwhpas (mgr.14227) 837 : audit [DBG] from='client.16376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:52.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:51 vm04 bash[34817]: audit 2026-04-16T19:35:51.407180+0000 mon.vm01 (mon.0) 1258 : audit [DBG] from='client.? 192.168.123.101:0/2918699687' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:52.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:51 vm04 bash[34817]: audit 2026-04-16T19:35:51.407180+0000 mon.vm01 (mon.0) 1258 : audit [DBG] from='client.? 192.168.123.101:0/2918699687' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:52.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:51 vm01 bash[28222]: cluster 2026-04-16T19:35:50.496667+0000 mgr.vm01.nwhpas (mgr.14227) 836 : cluster [DBG] pgmap v463: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-16T19:35:52.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:51 vm01 bash[28222]: cluster 2026-04-16T19:35:50.496667+0000 mgr.vm01.nwhpas (mgr.14227) 836 : cluster [DBG] pgmap v463: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-16T19:35:52.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:51 vm01 bash[28222]: audit 2026-04-16T19:35:50.931835+0000 mgr.vm01.nwhpas (mgr.14227) 837 : audit [DBG] from='client.16376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:52.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:51 vm01 bash[28222]: audit 2026-04-16T19:35:50.931835+0000 mgr.vm01.nwhpas (mgr.14227) 837 : audit [DBG] from='client.16376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:52.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:51 vm01 bash[28222]: audit 2026-04-16T19:35:51.407180+0000 mon.vm01 (mon.0) 1258 : audit [DBG] from='client.? 192.168.123.101:0/2918699687' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:52.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:51 vm01 bash[28222]: audit 2026-04-16T19:35:51.407180+0000 mon.vm01 (mon.0) 1258 : audit [DBG] from='client.? 192.168.123.101:0/2918699687' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:53.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:53 vm04 bash[34817]: audit 2026-04-16T19:35:51.148795+0000 mgr.vm01.nwhpas (mgr.14227) 838 : audit [DBG] from='client.16380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:53.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:53 vm04 bash[34817]: audit 2026-04-16T19:35:51.148795+0000 mgr.vm01.nwhpas (mgr.14227) 838 : audit [DBG] from='client.16380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:53.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:53 vm04 bash[34817]: cluster 2026-04-16T19:35:52.497074+0000 mgr.vm01.nwhpas (mgr.14227) 839 : cluster [DBG] pgmap v464: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:35:53.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:53 vm04 bash[34817]: cluster 2026-04-16T19:35:52.497074+0000 mgr.vm01.nwhpas (mgr.14227) 839 : cluster [DBG] pgmap v464: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:35:53.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:53 vm04 bash[34817]: audit 2026-04-16T19:35:52.584586+0000 mon.vm01 (mon.0) 1259 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:53.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:53 vm04 bash[34817]: audit 2026-04-16T19:35:52.584586+0000 mon.vm01 (mon.0) 1259 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:53.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:53 vm01 bash[28222]: audit 2026-04-16T19:35:51.148795+0000 mgr.vm01.nwhpas (mgr.14227) 838 : audit [DBG] from='client.16380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:53.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:53 vm01 bash[28222]: audit 2026-04-16T19:35:51.148795+0000 mgr.vm01.nwhpas (mgr.14227) 838 : audit [DBG] from='client.16380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:53.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:53 vm01 bash[28222]: cluster 2026-04-16T19:35:52.497074+0000 mgr.vm01.nwhpas (mgr.14227) 839 : cluster [DBG] pgmap v464: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:35:53.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:53 vm01 bash[28222]: cluster 2026-04-16T19:35:52.497074+0000 mgr.vm01.nwhpas (mgr.14227) 839 : cluster [DBG] pgmap v464: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-04-16T19:35:53.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:53 vm01 bash[28222]: audit 2026-04-16T19:35:52.584586+0000 mon.vm01 (mon.0) 1259 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:53.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:53 vm01 bash[28222]: audit 2026-04-16T19:35:52.584586+0000 mon.vm01 (mon.0) 1259 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:35:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:55 vm04 bash[34817]: cluster 2026-04-16T19:35:54.497553+0000 mgr.vm01.nwhpas (mgr.14227) 840 : cluster [DBG] pgmap v465: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:55 vm04 bash[34817]: cluster 2026-04-16T19:35:54.497553+0000 mgr.vm01.nwhpas (mgr.14227) 840 : cluster [DBG] pgmap v465: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:55 vm01 bash[28222]: cluster 2026-04-16T19:35:54.497553+0000 mgr.vm01.nwhpas (mgr.14227) 840 : cluster [DBG] pgmap v465: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:55 vm01 bash[28222]: cluster 2026-04-16T19:35:54.497553+0000 mgr.vm01.nwhpas (mgr.14227) 840 : cluster [DBG] pgmap v465: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:56.632 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:35:56.836 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:35:56.837 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (12m) 6m ago 12m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:35:56.837 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (6m) 6m ago 12m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:35:56.837 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (96s) 75s ago 12m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:35:56.837 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 75s ago 12m - - 2026-04-16T19:35:57.093 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:35:57.093 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:35:57.093 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:35:57.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:57 vm04 bash[34817]: cluster 2026-04-16T19:35:56.497926+0000 mgr.vm01.nwhpas (mgr.14227) 841 : cluster [DBG] pgmap v466: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:57.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:57 vm04 bash[34817]: cluster 2026-04-16T19:35:56.497926+0000 mgr.vm01.nwhpas (mgr.14227) 841 : cluster [DBG] pgmap v466: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:57.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:57 vm04 bash[34817]: audit 2026-04-16T19:35:56.606362+0000 mgr.vm01.nwhpas (mgr.14227) 842 : audit [DBG] from='client.16388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:57.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:57 vm04 bash[34817]: audit 2026-04-16T19:35:56.606362+0000 mgr.vm01.nwhpas (mgr.14227) 842 : audit [DBG] from='client.16388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:57.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:57 vm04 bash[34817]: audit 2026-04-16T19:35:56.820583+0000 mgr.vm01.nwhpas (mgr.14227) 843 : audit [DBG] from='client.16392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:57.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:57 vm04 bash[34817]: audit 2026-04-16T19:35:56.820583+0000 mgr.vm01.nwhpas (mgr.14227) 843 : audit [DBG] from='client.16392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:57.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:57 vm04 bash[34817]: audit 2026-04-16T19:35:57.086963+0000 mon.vm04 (mon.1) 39 : audit [DBG] from='client.? 192.168.123.101:0/3839855439' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:57.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:57 vm04 bash[34817]: audit 2026-04-16T19:35:57.086963+0000 mon.vm04 (mon.1) 39 : audit [DBG] from='client.? 192.168.123.101:0/3839855439' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:57.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:57 vm01 bash[28222]: cluster 2026-04-16T19:35:56.497926+0000 mgr.vm01.nwhpas (mgr.14227) 841 : cluster [DBG] pgmap v466: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:57.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:57 vm01 bash[28222]: cluster 2026-04-16T19:35:56.497926+0000 mgr.vm01.nwhpas (mgr.14227) 841 : cluster [DBG] pgmap v466: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:35:57.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:57 vm01 bash[28222]: audit 2026-04-16T19:35:56.606362+0000 mgr.vm01.nwhpas (mgr.14227) 842 : audit [DBG] from='client.16388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:57.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:57 vm01 bash[28222]: audit 2026-04-16T19:35:56.606362+0000 mgr.vm01.nwhpas (mgr.14227) 842 : audit [DBG] from='client.16388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:57.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:57 vm01 bash[28222]: audit 2026-04-16T19:35:56.820583+0000 mgr.vm01.nwhpas (mgr.14227) 843 : audit [DBG] from='client.16392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:57.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:57 vm01 bash[28222]: audit 2026-04-16T19:35:56.820583+0000 mgr.vm01.nwhpas (mgr.14227) 843 : audit [DBG] from='client.16392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:35:57.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:57 vm01 bash[28222]: audit 2026-04-16T19:35:57.086963+0000 mon.vm04 (mon.1) 39 : audit [DBG] from='client.? 192.168.123.101:0/3839855439' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:57.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:57 vm01 bash[28222]: audit 2026-04-16T19:35:57.086963+0000 mon.vm04 (mon.1) 39 : audit [DBG] from='client.? 192.168.123.101:0/3839855439' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:35:59.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:59 vm01 bash[28222]: cluster 2026-04-16T19:35:58.498254+0000 mgr.vm01.nwhpas (mgr.14227) 844 : cluster [DBG] pgmap v467: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:35:59.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:35:59 vm01 bash[28222]: cluster 2026-04-16T19:35:58.498254+0000 mgr.vm01.nwhpas (mgr.14227) 844 : cluster [DBG] pgmap v467: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:36:00.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:59 vm04 bash[34817]: cluster 2026-04-16T19:35:58.498254+0000 mgr.vm01.nwhpas (mgr.14227) 844 : cluster [DBG] pgmap v467: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:36:00.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:35:59 vm04 bash[34817]: cluster 2026-04-16T19:35:58.498254+0000 mgr.vm01.nwhpas (mgr.14227) 844 : cluster [DBG] pgmap v467: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:36:02.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:01 vm04 bash[34817]: cluster 2026-04-16T19:36:00.498645+0000 mgr.vm01.nwhpas (mgr.14227) 845 : cluster [DBG] pgmap v468: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:02.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:01 vm04 bash[34817]: cluster 2026-04-16T19:36:00.498645+0000 mgr.vm01.nwhpas (mgr.14227) 845 : cluster [DBG] pgmap v468: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:02.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:01 vm01 bash[28222]: cluster 2026-04-16T19:36:00.498645+0000 mgr.vm01.nwhpas (mgr.14227) 845 : cluster [DBG] pgmap v468: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:02.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:01 vm01 bash[28222]: cluster 2026-04-16T19:36:00.498645+0000 mgr.vm01.nwhpas (mgr.14227) 845 : cluster [DBG] pgmap v468: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:02.311 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:36:02.517 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:36:02.517 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (12m) 6m ago 12m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:36:02.517 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (6m) 6m ago 12m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:36:02.517 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (102s) 80s ago 12m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:36:02.517 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 80s ago 13m - - 2026-04-16T19:36:02.755 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:36:02.755 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:36:02.755 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:36:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:03 vm04 bash[34817]: audit 2026-04-16T19:36:02.287498+0000 mgr.vm01.nwhpas (mgr.14227) 846 : audit [DBG] from='client.16400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:03 vm04 bash[34817]: audit 2026-04-16T19:36:02.287498+0000 mgr.vm01.nwhpas (mgr.14227) 846 : audit [DBG] from='client.16400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:03 vm04 bash[34817]: cluster 2026-04-16T19:36:02.499006+0000 mgr.vm01.nwhpas (mgr.14227) 847 : cluster [DBG] pgmap v469: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:03 vm04 bash[34817]: cluster 2026-04-16T19:36:02.499006+0000 mgr.vm01.nwhpas (mgr.14227) 847 : cluster [DBG] pgmap v469: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:03 vm04 bash[34817]: audit 2026-04-16T19:36:02.509675+0000 mgr.vm01.nwhpas (mgr.14227) 848 : audit [DBG] from='client.16404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:03 vm04 bash[34817]: audit 2026-04-16T19:36:02.509675+0000 mgr.vm01.nwhpas (mgr.14227) 848 : audit [DBG] from='client.16404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:03 vm04 bash[34817]: audit 2026-04-16T19:36:02.749706+0000 mon.vm01 (mon.0) 1260 : audit [DBG] from='client.? 192.168.123.101:0/1197634121' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:03 vm04 bash[34817]: audit 2026-04-16T19:36:02.749706+0000 mon.vm01 (mon.0) 1260 : audit [DBG] from='client.? 192.168.123.101:0/1197634121' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:03 vm01 bash[28222]: audit 2026-04-16T19:36:02.287498+0000 mgr.vm01.nwhpas (mgr.14227) 846 : audit [DBG] from='client.16400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:03 vm01 bash[28222]: audit 2026-04-16T19:36:02.287498+0000 mgr.vm01.nwhpas (mgr.14227) 846 : audit [DBG] from='client.16400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:03 vm01 bash[28222]: cluster 2026-04-16T19:36:02.499006+0000 mgr.vm01.nwhpas (mgr.14227) 847 : cluster [DBG] pgmap v469: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:03 vm01 bash[28222]: cluster 2026-04-16T19:36:02.499006+0000 mgr.vm01.nwhpas (mgr.14227) 847 : cluster [DBG] pgmap v469: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:03 vm01 bash[28222]: audit 2026-04-16T19:36:02.509675+0000 mgr.vm01.nwhpas (mgr.14227) 848 : audit [DBG] from='client.16404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:03 vm01 bash[28222]: audit 2026-04-16T19:36:02.509675+0000 mgr.vm01.nwhpas (mgr.14227) 848 : audit [DBG] from='client.16404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:03 vm01 bash[28222]: audit 2026-04-16T19:36:02.749706+0000 mon.vm01 (mon.0) 1260 : audit [DBG] from='client.? 192.168.123.101:0/1197634121' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:03 vm01 bash[28222]: audit 2026-04-16T19:36:02.749706+0000 mon.vm01 (mon.0) 1260 : audit [DBG] from='client.? 192.168.123.101:0/1197634121' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:06.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:05 vm04 bash[34817]: cluster 2026-04-16T19:36:04.499414+0000 mgr.vm01.nwhpas (mgr.14227) 849 : cluster [DBG] pgmap v470: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:06.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:05 vm04 bash[34817]: cluster 2026-04-16T19:36:04.499414+0000 mgr.vm01.nwhpas (mgr.14227) 849 : cluster [DBG] pgmap v470: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:06.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:05 vm01 bash[28222]: cluster 2026-04-16T19:36:04.499414+0000 mgr.vm01.nwhpas (mgr.14227) 849 : cluster [DBG] pgmap v470: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:06.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:05 vm01 bash[28222]: cluster 2026-04-16T19:36:04.499414+0000 mgr.vm01.nwhpas (mgr.14227) 849 : cluster [DBG] pgmap v470: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:07.968 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:36:08.161 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:36:08.161 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (12m) 6m ago 13m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:36:08.161 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (6m) 6m ago 13m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:36:08.161 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (108s) 86s ago 13m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:36:08.161 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 86s ago 13m - - 2026-04-16T19:36:08.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:07 vm04 bash[34817]: cluster 2026-04-16T19:36:06.499872+0000 mgr.vm01.nwhpas (mgr.14227) 850 : cluster [DBG] pgmap v471: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:08.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:07 vm04 bash[34817]: cluster 2026-04-16T19:36:06.499872+0000 mgr.vm01.nwhpas (mgr.14227) 850 : cluster [DBG] pgmap v471: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:08.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:07 vm04 bash[34817]: audit 2026-04-16T19:36:07.584796+0000 mon.vm01 (mon.0) 1261 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:08.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:07 vm04 bash[34817]: audit 2026-04-16T19:36:07.584796+0000 mon.vm01 (mon.0) 1261 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:08.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:07 vm01 bash[28222]: cluster 2026-04-16T19:36:06.499872+0000 mgr.vm01.nwhpas (mgr.14227) 850 : cluster [DBG] pgmap v471: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:08.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:07 vm01 bash[28222]: cluster 2026-04-16T19:36:06.499872+0000 mgr.vm01.nwhpas (mgr.14227) 850 : cluster [DBG] pgmap v471: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:08.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:07 vm01 bash[28222]: audit 2026-04-16T19:36:07.584796+0000 mon.vm01 (mon.0) 1261 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:08.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:07 vm01 bash[28222]: audit 2026-04-16T19:36:07.584796+0000 mon.vm01 (mon.0) 1261 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:08.397 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:36:08.397 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:36:08.397 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:36:09.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:08 vm04 bash[34817]: audit 2026-04-16T19:36:08.391667+0000 mon.vm01 (mon.0) 1262 : audit [DBG] from='client.? 192.168.123.101:0/2129724236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:09.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:08 vm04 bash[34817]: audit 2026-04-16T19:36:08.391667+0000 mon.vm01 (mon.0) 1262 : audit [DBG] from='client.? 192.168.123.101:0/2129724236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:09.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:08 vm01 bash[28222]: audit 2026-04-16T19:36:08.391667+0000 mon.vm01 (mon.0) 1262 : audit [DBG] from='client.? 192.168.123.101:0/2129724236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:09.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:08 vm01 bash[28222]: audit 2026-04-16T19:36:08.391667+0000 mon.vm01 (mon.0) 1262 : audit [DBG] from='client.? 192.168.123.101:0/2129724236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:10.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:09 vm04 bash[34817]: audit 2026-04-16T19:36:07.946774+0000 mgr.vm01.nwhpas (mgr.14227) 851 : audit [DBG] from='client.16412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:10.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:09 vm04 bash[34817]: audit 2026-04-16T19:36:07.946774+0000 mgr.vm01.nwhpas (mgr.14227) 851 : audit [DBG] from='client.16412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:10.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:09 vm04 bash[34817]: audit 2026-04-16T19:36:08.152709+0000 mgr.vm01.nwhpas (mgr.14227) 852 : audit [DBG] from='client.16416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:10.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:09 vm04 bash[34817]: audit 2026-04-16T19:36:08.152709+0000 mgr.vm01.nwhpas (mgr.14227) 852 : audit [DBG] from='client.16416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:10.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:09 vm04 bash[34817]: cluster 2026-04-16T19:36:08.500221+0000 mgr.vm01.nwhpas (mgr.14227) 853 : cluster [DBG] pgmap v472: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:10.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:09 vm04 bash[34817]: cluster 2026-04-16T19:36:08.500221+0000 mgr.vm01.nwhpas (mgr.14227) 853 : cluster [DBG] pgmap v472: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:10.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:09 vm01 bash[28222]: audit 2026-04-16T19:36:07.946774+0000 mgr.vm01.nwhpas (mgr.14227) 851 : audit [DBG] from='client.16412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:10.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:09 vm01 bash[28222]: audit 2026-04-16T19:36:07.946774+0000 mgr.vm01.nwhpas (mgr.14227) 851 : audit [DBG] from='client.16412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:10.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:09 vm01 bash[28222]: audit 2026-04-16T19:36:08.152709+0000 mgr.vm01.nwhpas (mgr.14227) 852 : audit [DBG] from='client.16416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:10.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:09 vm01 bash[28222]: audit 2026-04-16T19:36:08.152709+0000 mgr.vm01.nwhpas (mgr.14227) 852 : audit [DBG] from='client.16416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:10.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:09 vm01 bash[28222]: cluster 2026-04-16T19:36:08.500221+0000 mgr.vm01.nwhpas (mgr.14227) 853 : cluster [DBG] pgmap v472: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:10.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:09 vm01 bash[28222]: cluster 2026-04-16T19:36:08.500221+0000 mgr.vm01.nwhpas (mgr.14227) 853 : cluster [DBG] pgmap v472: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:11 vm04 bash[34817]: cluster 2026-04-16T19:36:10.500634+0000 mgr.vm01.nwhpas (mgr.14227) 854 : cluster [DBG] pgmap v473: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:11 vm04 bash[34817]: cluster 2026-04-16T19:36:10.500634+0000 mgr.vm01.nwhpas (mgr.14227) 854 : cluster [DBG] pgmap v473: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:12.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:11 vm01 bash[28222]: cluster 2026-04-16T19:36:10.500634+0000 mgr.vm01.nwhpas (mgr.14227) 854 : cluster [DBG] pgmap v473: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:12.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:11 vm01 bash[28222]: cluster 2026-04-16T19:36:10.500634+0000 mgr.vm01.nwhpas (mgr.14227) 854 : cluster [DBG] pgmap v473: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:13.603 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:36:13.794 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:36:13.794 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (12m) 6m ago 13m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:36:13.795 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (7m) 6m ago 13m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:36:13.795 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (113s) 92s ago 13m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:36:13.795 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 92s ago 13m - - 2026-04-16T19:36:14.024 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:36:14.024 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:36:14.024 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:36:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:13 vm04 bash[34817]: cluster 2026-04-16T19:36:12.501075+0000 mgr.vm01.nwhpas (mgr.14227) 855 : cluster [DBG] pgmap v474: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:13 vm04 bash[34817]: cluster 2026-04-16T19:36:12.501075+0000 mgr.vm01.nwhpas (mgr.14227) 855 : cluster [DBG] pgmap v474: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:14.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:13 vm01 bash[28222]: cluster 2026-04-16T19:36:12.501075+0000 mgr.vm01.nwhpas (mgr.14227) 855 : cluster [DBG] pgmap v474: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:14.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:13 vm01 bash[28222]: cluster 2026-04-16T19:36:12.501075+0000 mgr.vm01.nwhpas (mgr.14227) 855 : cluster [DBG] pgmap v474: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:15.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:14 vm04 bash[34817]: audit 2026-04-16T19:36:13.581691+0000 mgr.vm01.nwhpas (mgr.14227) 856 : audit [DBG] from='client.16424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:15.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:14 vm04 bash[34817]: audit 2026-04-16T19:36:13.581691+0000 mgr.vm01.nwhpas (mgr.14227) 856 : audit [DBG] from='client.16424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:15.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:14 vm04 bash[34817]: audit 2026-04-16T19:36:13.786176+0000 mgr.vm01.nwhpas (mgr.14227) 857 : audit [DBG] from='client.16428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:15.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:14 vm04 bash[34817]: audit 2026-04-16T19:36:13.786176+0000 mgr.vm01.nwhpas (mgr.14227) 857 : audit [DBG] from='client.16428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:15.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:14 vm04 bash[34817]: audit 2026-04-16T19:36:14.018333+0000 mon.vm04 (mon.1) 40 : audit [DBG] from='client.? 192.168.123.101:0/3721834040' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:15.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:14 vm04 bash[34817]: audit 2026-04-16T19:36:14.018333+0000 mon.vm04 (mon.1) 40 : audit [DBG] from='client.? 192.168.123.101:0/3721834040' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:15.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:14 vm01 bash[28222]: audit 2026-04-16T19:36:13.581691+0000 mgr.vm01.nwhpas (mgr.14227) 856 : audit [DBG] from='client.16424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:15.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:14 vm01 bash[28222]: audit 2026-04-16T19:36:13.581691+0000 mgr.vm01.nwhpas (mgr.14227) 856 : audit [DBG] from='client.16424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:15.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:14 vm01 bash[28222]: audit 2026-04-16T19:36:13.786176+0000 mgr.vm01.nwhpas (mgr.14227) 857 : audit [DBG] from='client.16428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:15.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:14 vm01 bash[28222]: audit 2026-04-16T19:36:13.786176+0000 mgr.vm01.nwhpas (mgr.14227) 857 : audit [DBG] from='client.16428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:15.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:14 vm01 bash[28222]: audit 2026-04-16T19:36:14.018333+0000 mon.vm04 (mon.1) 40 : audit [DBG] from='client.? 192.168.123.101:0/3721834040' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:15.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:14 vm01 bash[28222]: audit 2026-04-16T19:36:14.018333+0000 mon.vm04 (mon.1) 40 : audit [DBG] from='client.? 192.168.123.101:0/3721834040' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:16.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:15 vm04 bash[34817]: cluster 2026-04-16T19:36:14.501491+0000 mgr.vm01.nwhpas (mgr.14227) 858 : cluster [DBG] pgmap v475: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:16.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:15 vm04 bash[34817]: cluster 2026-04-16T19:36:14.501491+0000 mgr.vm01.nwhpas (mgr.14227) 858 : cluster [DBG] pgmap v475: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:16.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:15 vm01 bash[28222]: cluster 2026-04-16T19:36:14.501491+0000 mgr.vm01.nwhpas (mgr.14227) 858 : cluster [DBG] pgmap v475: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:16.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:15 vm01 bash[28222]: cluster 2026-04-16T19:36:14.501491+0000 mgr.vm01.nwhpas (mgr.14227) 858 : cluster [DBG] pgmap v475: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:18.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:17 vm04 bash[34817]: cluster 2026-04-16T19:36:16.501872+0000 mgr.vm01.nwhpas (mgr.14227) 859 : cluster [DBG] pgmap v476: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:18.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:17 vm04 bash[34817]: cluster 2026-04-16T19:36:16.501872+0000 mgr.vm01.nwhpas (mgr.14227) 859 : cluster [DBG] pgmap v476: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:18.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:17 vm01 bash[28222]: cluster 2026-04-16T19:36:16.501872+0000 mgr.vm01.nwhpas (mgr.14227) 859 : cluster [DBG] pgmap v476: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:18.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:17 vm01 bash[28222]: cluster 2026-04-16T19:36:16.501872+0000 mgr.vm01.nwhpas (mgr.14227) 859 : cluster [DBG] pgmap v476: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:19.234 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:36:19.422 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:36:19.422 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (12m) 7m ago 13m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:36:19.422 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (7m) 7m ago 13m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:36:19.423 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (119s) 97s ago 13m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:36:19.423 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 97s ago 13m - - 2026-04-16T19:36:19.645 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:36:19.645 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:36:19.645 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:36:20.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:19 vm04 bash[34817]: cluster 2026-04-16T19:36:18.502225+0000 mgr.vm01.nwhpas (mgr.14227) 860 : cluster [DBG] pgmap v477: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:20.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:19 vm04 bash[34817]: cluster 2026-04-16T19:36:18.502225+0000 mgr.vm01.nwhpas (mgr.14227) 860 : cluster [DBG] pgmap v477: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:20.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:19 vm04 bash[34817]: audit 2026-04-16T19:36:19.639915+0000 mon.vm01 (mon.0) 1263 : audit [DBG] from='client.? 192.168.123.101:0/3361053821' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:20.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:19 vm04 bash[34817]: audit 2026-04-16T19:36:19.639915+0000 mon.vm01 (mon.0) 1263 : audit [DBG] from='client.? 192.168.123.101:0/3361053821' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:20.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:19 vm01 bash[28222]: cluster 2026-04-16T19:36:18.502225+0000 mgr.vm01.nwhpas (mgr.14227) 860 : cluster [DBG] pgmap v477: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:20.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:19 vm01 bash[28222]: cluster 2026-04-16T19:36:18.502225+0000 mgr.vm01.nwhpas (mgr.14227) 860 : cluster [DBG] pgmap v477: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:20.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:19 vm01 bash[28222]: audit 2026-04-16T19:36:19.639915+0000 mon.vm01 (mon.0) 1263 : audit [DBG] from='client.? 192.168.123.101:0/3361053821' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:20.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:19 vm01 bash[28222]: audit 2026-04-16T19:36:19.639915+0000 mon.vm01 (mon.0) 1263 : audit [DBG] from='client.? 192.168.123.101:0/3361053821' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:20 vm04 bash[34817]: audit 2026-04-16T19:36:19.210511+0000 mgr.vm01.nwhpas (mgr.14227) 861 : audit [DBG] from='client.16436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:20 vm04 bash[34817]: audit 2026-04-16T19:36:19.210511+0000 mgr.vm01.nwhpas (mgr.14227) 861 : audit [DBG] from='client.16436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:20 vm04 bash[34817]: audit 2026-04-16T19:36:19.414607+0000 mgr.vm01.nwhpas (mgr.14227) 862 : audit [DBG] from='client.16440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:21.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:20 vm04 bash[34817]: audit 2026-04-16T19:36:19.414607+0000 mgr.vm01.nwhpas (mgr.14227) 862 : audit [DBG] from='client.16440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:20 vm01 bash[28222]: audit 2026-04-16T19:36:19.210511+0000 mgr.vm01.nwhpas (mgr.14227) 861 : audit [DBG] from='client.16436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:20 vm01 bash[28222]: audit 2026-04-16T19:36:19.210511+0000 mgr.vm01.nwhpas (mgr.14227) 861 : audit [DBG] from='client.16436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:20 vm01 bash[28222]: audit 2026-04-16T19:36:19.414607+0000 mgr.vm01.nwhpas (mgr.14227) 862 : audit [DBG] from='client.16440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:21.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:20 vm01 bash[28222]: audit 2026-04-16T19:36:19.414607+0000 mgr.vm01.nwhpas (mgr.14227) 862 : audit [DBG] from='client.16440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:22.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:21 vm04 bash[34817]: cluster 2026-04-16T19:36:20.502667+0000 mgr.vm01.nwhpas (mgr.14227) 863 : cluster [DBG] pgmap v478: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:22.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:21 vm04 bash[34817]: cluster 2026-04-16T19:36:20.502667+0000 mgr.vm01.nwhpas (mgr.14227) 863 : cluster [DBG] pgmap v478: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:22.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:21 vm01 bash[28222]: cluster 2026-04-16T19:36:20.502667+0000 mgr.vm01.nwhpas (mgr.14227) 863 : cluster [DBG] pgmap v478: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:22.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:21 vm01 bash[28222]: cluster 2026-04-16T19:36:20.502667+0000 mgr.vm01.nwhpas (mgr.14227) 863 : cluster [DBG] pgmap v478: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:23.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:22 vm04 bash[34817]: audit 2026-04-16T19:36:22.584936+0000 mon.vm01 (mon.0) 1264 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:23.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:22 vm04 bash[34817]: audit 2026-04-16T19:36:22.584936+0000 mon.vm01 (mon.0) 1264 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:23.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:22 vm01 bash[28222]: audit 2026-04-16T19:36:22.584936+0000 mon.vm01 (mon.0) 1264 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:23.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:22 vm01 bash[28222]: audit 2026-04-16T19:36:22.584936+0000 mon.vm01 (mon.0) 1264 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:24.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:23 vm04 bash[34817]: cluster 2026-04-16T19:36:22.503105+0000 mgr.vm01.nwhpas (mgr.14227) 864 : cluster [DBG] pgmap v479: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:24.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:23 vm04 bash[34817]: cluster 2026-04-16T19:36:22.503105+0000 mgr.vm01.nwhpas (mgr.14227) 864 : cluster [DBG] pgmap v479: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:24.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:23 vm01 bash[28222]: cluster 2026-04-16T19:36:22.503105+0000 mgr.vm01.nwhpas (mgr.14227) 864 : cluster [DBG] pgmap v479: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:24.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:23 vm01 bash[28222]: cluster 2026-04-16T19:36:22.503105+0000 mgr.vm01.nwhpas (mgr.14227) 864 : cluster [DBG] pgmap v479: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:24.848 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:36:25.040 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:36:25.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (12m) 7m ago 13m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:36:25.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (7m) 7m ago 13m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:36:25.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 103s ago 13m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:36:25.041 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 103s ago 13m - - 2026-04-16T19:36:25.279 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:36:25.279 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:36:25.279 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:36:26.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:25 vm04 bash[34817]: cluster 2026-04-16T19:36:24.503594+0000 mgr.vm01.nwhpas (mgr.14227) 865 : cluster [DBG] pgmap v480: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:26.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:25 vm04 bash[34817]: cluster 2026-04-16T19:36:24.503594+0000 mgr.vm01.nwhpas (mgr.14227) 865 : cluster [DBG] pgmap v480: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:26.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:25 vm04 bash[34817]: audit 2026-04-16T19:36:24.824721+0000 mgr.vm01.nwhpas (mgr.14227) 866 : audit [DBG] from='client.16448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:26.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:25 vm04 bash[34817]: audit 2026-04-16T19:36:24.824721+0000 mgr.vm01.nwhpas (mgr.14227) 866 : audit [DBG] from='client.16448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:26.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:25 vm04 bash[34817]: audit 2026-04-16T19:36:25.273556+0000 mon.vm01 (mon.0) 1265 : audit [DBG] from='client.? 192.168.123.101:0/840097085' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:26.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:25 vm04 bash[34817]: audit 2026-04-16T19:36:25.273556+0000 mon.vm01 (mon.0) 1265 : audit [DBG] from='client.? 192.168.123.101:0/840097085' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:26.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:25 vm01 bash[28222]: cluster 2026-04-16T19:36:24.503594+0000 mgr.vm01.nwhpas (mgr.14227) 865 : cluster [DBG] pgmap v480: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:26.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:25 vm01 bash[28222]: cluster 2026-04-16T19:36:24.503594+0000 mgr.vm01.nwhpas (mgr.14227) 865 : cluster [DBG] pgmap v480: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:26.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:25 vm01 bash[28222]: audit 2026-04-16T19:36:24.824721+0000 mgr.vm01.nwhpas (mgr.14227) 866 : audit [DBG] from='client.16448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:26.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:25 vm01 bash[28222]: audit 2026-04-16T19:36:24.824721+0000 mgr.vm01.nwhpas (mgr.14227) 866 : audit [DBG] from='client.16448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:26.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:25 vm01 bash[28222]: audit 2026-04-16T19:36:25.273556+0000 mon.vm01 (mon.0) 1265 : audit [DBG] from='client.? 192.168.123.101:0/840097085' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:26.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:25 vm01 bash[28222]: audit 2026-04-16T19:36:25.273556+0000 mon.vm01 (mon.0) 1265 : audit [DBG] from='client.? 192.168.123.101:0/840097085' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:26 vm04 bash[34817]: audit 2026-04-16T19:36:25.031586+0000 mgr.vm01.nwhpas (mgr.14227) 867 : audit [DBG] from='client.16452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:26 vm04 bash[34817]: audit 2026-04-16T19:36:25.031586+0000 mgr.vm01.nwhpas (mgr.14227) 867 : audit [DBG] from='client.16452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:27.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:26 vm01 bash[28222]: audit 2026-04-16T19:36:25.031586+0000 mgr.vm01.nwhpas (mgr.14227) 867 : audit [DBG] from='client.16452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:27.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:26 vm01 bash[28222]: audit 2026-04-16T19:36:25.031586+0000 mgr.vm01.nwhpas (mgr.14227) 867 : audit [DBG] from='client.16452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:28.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:27 vm04 bash[34817]: cluster 2026-04-16T19:36:26.504025+0000 mgr.vm01.nwhpas (mgr.14227) 868 : cluster [DBG] pgmap v481: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:28.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:27 vm04 bash[34817]: cluster 2026-04-16T19:36:26.504025+0000 mgr.vm01.nwhpas (mgr.14227) 868 : cluster [DBG] pgmap v481: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:28.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:27 vm01 bash[28222]: cluster 2026-04-16T19:36:26.504025+0000 mgr.vm01.nwhpas (mgr.14227) 868 : cluster [DBG] pgmap v481: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:28.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:27 vm01 bash[28222]: cluster 2026-04-16T19:36:26.504025+0000 mgr.vm01.nwhpas (mgr.14227) 868 : cluster [DBG] pgmap v481: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:30.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:29 vm04 bash[34817]: cluster 2026-04-16T19:36:28.504427+0000 mgr.vm01.nwhpas (mgr.14227) 869 : cluster [DBG] pgmap v482: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:36:30.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:29 vm04 bash[34817]: cluster 2026-04-16T19:36:28.504427+0000 mgr.vm01.nwhpas (mgr.14227) 869 : cluster [DBG] pgmap v482: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:36:30.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:29 vm01 bash[28222]: cluster 2026-04-16T19:36:28.504427+0000 mgr.vm01.nwhpas (mgr.14227) 869 : cluster [DBG] pgmap v482: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:36:30.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:29 vm01 bash[28222]: cluster 2026-04-16T19:36:28.504427+0000 mgr.vm01.nwhpas (mgr.14227) 869 : cluster [DBG] pgmap v482: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:36:30.504 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:36:30.694 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:36:30.694 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (12m) 7m ago 13m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:36:30.694 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (7m) 7m ago 13m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:36:30.694 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 108s ago 13m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:36:30.695 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 108s ago 13m - - 2026-04-16T19:36:30.932 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:36:30.932 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:36:30.933 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:36:32.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:31 vm04 bash[34817]: audit 2026-04-16T19:36:30.481308+0000 mgr.vm01.nwhpas (mgr.14227) 870 : audit [DBG] from='client.16460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:32.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:31 vm04 bash[34817]: audit 2026-04-16T19:36:30.481308+0000 mgr.vm01.nwhpas (mgr.14227) 870 : audit [DBG] from='client.16460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:32.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:31 vm04 bash[34817]: cluster 2026-04-16T19:36:30.504860+0000 mgr.vm01.nwhpas (mgr.14227) 871 : cluster [DBG] pgmap v483: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:32.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:31 vm04 bash[34817]: cluster 2026-04-16T19:36:30.504860+0000 mgr.vm01.nwhpas (mgr.14227) 871 : cluster [DBG] pgmap v483: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:32.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:31 vm04 bash[34817]: audit 2026-04-16T19:36:30.686401+0000 mgr.vm01.nwhpas (mgr.14227) 872 : audit [DBG] from='client.16464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:32.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:31 vm04 bash[34817]: audit 2026-04-16T19:36:30.686401+0000 mgr.vm01.nwhpas (mgr.14227) 872 : audit [DBG] from='client.16464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:32.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:31 vm04 bash[34817]: audit 2026-04-16T19:36:30.927091+0000 mon.vm01 (mon.0) 1266 : audit [DBG] from='client.? 192.168.123.101:0/2552129289' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:32.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:31 vm04 bash[34817]: audit 2026-04-16T19:36:30.927091+0000 mon.vm01 (mon.0) 1266 : audit [DBG] from='client.? 192.168.123.101:0/2552129289' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:32.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:31 vm01 bash[28222]: audit 2026-04-16T19:36:30.481308+0000 mgr.vm01.nwhpas (mgr.14227) 870 : audit [DBG] from='client.16460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:32.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:31 vm01 bash[28222]: audit 2026-04-16T19:36:30.481308+0000 mgr.vm01.nwhpas (mgr.14227) 870 : audit [DBG] from='client.16460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:32.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:31 vm01 bash[28222]: cluster 2026-04-16T19:36:30.504860+0000 mgr.vm01.nwhpas (mgr.14227) 871 : cluster [DBG] pgmap v483: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:32.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:31 vm01 bash[28222]: cluster 2026-04-16T19:36:30.504860+0000 mgr.vm01.nwhpas (mgr.14227) 871 : cluster [DBG] pgmap v483: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:32.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:31 vm01 bash[28222]: audit 2026-04-16T19:36:30.686401+0000 mgr.vm01.nwhpas (mgr.14227) 872 : audit [DBG] from='client.16464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:32.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:31 vm01 bash[28222]: audit 2026-04-16T19:36:30.686401+0000 mgr.vm01.nwhpas (mgr.14227) 872 : audit [DBG] from='client.16464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:32.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:31 vm01 bash[28222]: audit 2026-04-16T19:36:30.927091+0000 mon.vm01 (mon.0) 1266 : audit [DBG] from='client.? 192.168.123.101:0/2552129289' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:32.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:31 vm01 bash[28222]: audit 2026-04-16T19:36:30.927091+0000 mon.vm01 (mon.0) 1266 : audit [DBG] from='client.? 192.168.123.101:0/2552129289' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:34.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:33 vm04 bash[34817]: cluster 2026-04-16T19:36:32.505362+0000 mgr.vm01.nwhpas (mgr.14227) 873 : cluster [DBG] pgmap v484: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:34.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:33 vm04 bash[34817]: cluster 2026-04-16T19:36:32.505362+0000 mgr.vm01.nwhpas (mgr.14227) 873 : cluster [DBG] pgmap v484: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:34.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:33 vm01 bash[28222]: cluster 2026-04-16T19:36:32.505362+0000 mgr.vm01.nwhpas (mgr.14227) 873 : cluster [DBG] pgmap v484: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:34.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:33 vm01 bash[28222]: cluster 2026-04-16T19:36:32.505362+0000 mgr.vm01.nwhpas (mgr.14227) 873 : cluster [DBG] pgmap v484: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:36.151 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:36:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:35 vm04 bash[34817]: cluster 2026-04-16T19:36:34.506011+0000 mgr.vm01.nwhpas (mgr.14227) 874 : cluster [DBG] pgmap v485: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:35 vm04 bash[34817]: cluster 2026-04-16T19:36:34.506011+0000 mgr.vm01.nwhpas (mgr.14227) 874 : cluster [DBG] pgmap v485: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:36.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:35 vm01 bash[28222]: cluster 2026-04-16T19:36:34.506011+0000 mgr.vm01.nwhpas (mgr.14227) 874 : cluster [DBG] pgmap v485: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:36.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:35 vm01 bash[28222]: cluster 2026-04-16T19:36:34.506011+0000 mgr.vm01.nwhpas (mgr.14227) 874 : cluster [DBG] pgmap v485: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:36.337 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:36:36.337 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (12m) 7m ago 13m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:36:36.337 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (7m) 7m ago 13m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:36:36.337 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 114s ago 13m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:36:36.338 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 114s ago 13m - - 2026-04-16T19:36:36.565 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:36:36.565 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:36:36.565 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:36:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:36 vm04 bash[34817]: audit 2026-04-16T19:36:36.559671+0000 mon.vm01 (mon.0) 1267 : audit [DBG] from='client.? 192.168.123.101:0/4167369508' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:36 vm04 bash[34817]: audit 2026-04-16T19:36:36.559671+0000 mon.vm01 (mon.0) 1267 : audit [DBG] from='client.? 192.168.123.101:0/4167369508' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:37.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:36 vm01 bash[28222]: audit 2026-04-16T19:36:36.559671+0000 mon.vm01 (mon.0) 1267 : audit [DBG] from='client.? 192.168.123.101:0/4167369508' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:37.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:36 vm01 bash[28222]: audit 2026-04-16T19:36:36.559671+0000 mon.vm01 (mon.0) 1267 : audit [DBG] from='client.? 192.168.123.101:0/4167369508' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:37 vm01 bash[28222]: audit 2026-04-16T19:36:36.128770+0000 mgr.vm01.nwhpas (mgr.14227) 875 : audit [DBG] from='client.16472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:37 vm01 bash[28222]: audit 2026-04-16T19:36:36.128770+0000 mgr.vm01.nwhpas (mgr.14227) 875 : audit [DBG] from='client.16472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:37 vm01 bash[28222]: audit 2026-04-16T19:36:36.329693+0000 mgr.vm01.nwhpas (mgr.14227) 876 : audit [DBG] from='client.16476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:37 vm01 bash[28222]: audit 2026-04-16T19:36:36.329693+0000 mgr.vm01.nwhpas (mgr.14227) 876 : audit [DBG] from='client.16476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:37 vm01 bash[28222]: cluster 2026-04-16T19:36:36.506488+0000 mgr.vm01.nwhpas (mgr.14227) 877 : cluster [DBG] pgmap v486: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:38.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:37 vm01 bash[28222]: cluster 2026-04-16T19:36:36.506488+0000 mgr.vm01.nwhpas (mgr.14227) 877 : cluster [DBG] pgmap v486: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:38.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:37 vm01 bash[28222]: audit 2026-04-16T19:36:37.585018+0000 mon.vm01 (mon.0) 1268 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:38.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:37 vm01 bash[28222]: audit 2026-04-16T19:36:37.585018+0000 mon.vm01 (mon.0) 1268 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:38.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:37 vm04 bash[34817]: audit 2026-04-16T19:36:36.128770+0000 mgr.vm01.nwhpas (mgr.14227) 875 : audit [DBG] from='client.16472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:38.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:37 vm04 bash[34817]: audit 2026-04-16T19:36:36.128770+0000 mgr.vm01.nwhpas (mgr.14227) 875 : audit [DBG] from='client.16472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:38.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:37 vm04 bash[34817]: audit 2026-04-16T19:36:36.329693+0000 mgr.vm01.nwhpas (mgr.14227) 876 : audit [DBG] from='client.16476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:38.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:37 vm04 bash[34817]: audit 2026-04-16T19:36:36.329693+0000 mgr.vm01.nwhpas (mgr.14227) 876 : audit [DBG] from='client.16476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:38.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:37 vm04 bash[34817]: cluster 2026-04-16T19:36:36.506488+0000 mgr.vm01.nwhpas (mgr.14227) 877 : cluster [DBG] pgmap v486: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:38.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:37 vm04 bash[34817]: cluster 2026-04-16T19:36:36.506488+0000 mgr.vm01.nwhpas (mgr.14227) 877 : cluster [DBG] pgmap v486: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:38.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:37 vm04 bash[34817]: audit 2026-04-16T19:36:37.585018+0000 mon.vm01 (mon.0) 1268 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:38.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:37 vm04 bash[34817]: audit 2026-04-16T19:36:37.585018+0000 mon.vm01 (mon.0) 1268 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:39.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:38 vm04 bash[34817]: cluster 2026-04-16T19:36:38.506830+0000 mgr.vm01.nwhpas (mgr.14227) 878 : cluster [DBG] pgmap v487: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:39.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:38 vm04 bash[34817]: cluster 2026-04-16T19:36:38.506830+0000 mgr.vm01.nwhpas (mgr.14227) 878 : cluster [DBG] pgmap v487: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:39.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:38 vm01 bash[28222]: cluster 2026-04-16T19:36:38.506830+0000 mgr.vm01.nwhpas (mgr.14227) 878 : cluster [DBG] pgmap v487: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:39.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:38 vm01 bash[28222]: cluster 2026-04-16T19:36:38.506830+0000 mgr.vm01.nwhpas (mgr.14227) 878 : cluster [DBG] pgmap v487: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:36:41.780 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:36:41.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:41 vm04 bash[34817]: cluster 2026-04-16T19:36:40.507220+0000 mgr.vm01.nwhpas (mgr.14227) 879 : cluster [DBG] pgmap v488: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:41.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:41 vm04 bash[34817]: cluster 2026-04-16T19:36:40.507220+0000 mgr.vm01.nwhpas (mgr.14227) 879 : cluster [DBG] pgmap v488: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:41.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:41 vm01 bash[28222]: cluster 2026-04-16T19:36:40.507220+0000 mgr.vm01.nwhpas (mgr.14227) 879 : cluster [DBG] pgmap v488: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:41.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:41 vm01 bash[28222]: cluster 2026-04-16T19:36:40.507220+0000 mgr.vm01.nwhpas (mgr.14227) 879 : cluster [DBG] pgmap v488: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:41.983 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:36:41.983 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (12m) 7m ago 13m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:36:41.983 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (7m) 7m ago 13m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:36:41.983 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 2m ago 13m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:36:41.983 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 2m ago 13m - - 2026-04-16T19:36:42.226 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:36:42.226 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:36:42.226 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:36:42.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:42 vm04 bash[34817]: audit 2026-04-16T19:36:41.755585+0000 mgr.vm01.nwhpas (mgr.14227) 880 : audit [DBG] from='client.16484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:42.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:42 vm04 bash[34817]: audit 2026-04-16T19:36:41.755585+0000 mgr.vm01.nwhpas (mgr.14227) 880 : audit [DBG] from='client.16484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:42.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:42 vm04 bash[34817]: audit 2026-04-16T19:36:42.220643+0000 mon.vm01 (mon.0) 1269 : audit [DBG] from='client.? 192.168.123.101:0/2587983099' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:42.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:42 vm04 bash[34817]: audit 2026-04-16T19:36:42.220643+0000 mon.vm01 (mon.0) 1269 : audit [DBG] from='client.? 192.168.123.101:0/2587983099' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:42.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:42 vm04 bash[34817]: audit 2026-04-16T19:36:42.519341+0000 mon.vm01 (mon.0) 1270 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:36:42.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:42 vm04 bash[34817]: audit 2026-04-16T19:36:42.519341+0000 mon.vm01 (mon.0) 1270 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:36:42.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:42 vm01 bash[28222]: audit 2026-04-16T19:36:41.755585+0000 mgr.vm01.nwhpas (mgr.14227) 880 : audit [DBG] from='client.16484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:42.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:42 vm01 bash[28222]: audit 2026-04-16T19:36:41.755585+0000 mgr.vm01.nwhpas (mgr.14227) 880 : audit [DBG] from='client.16484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:42.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:42 vm01 bash[28222]: audit 2026-04-16T19:36:42.220643+0000 mon.vm01 (mon.0) 1269 : audit [DBG] from='client.? 192.168.123.101:0/2587983099' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:42.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:42 vm01 bash[28222]: audit 2026-04-16T19:36:42.220643+0000 mon.vm01 (mon.0) 1269 : audit [DBG] from='client.? 192.168.123.101:0/2587983099' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:42.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:42 vm01 bash[28222]: audit 2026-04-16T19:36:42.519341+0000 mon.vm01 (mon.0) 1270 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:36:42.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:42 vm01 bash[28222]: audit 2026-04-16T19:36:42.519341+0000 mon.vm01 (mon.0) 1270 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:36:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: audit 2026-04-16T19:36:41.975009+0000 mgr.vm01.nwhpas (mgr.14227) 881 : audit [DBG] from='client.16488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: audit 2026-04-16T19:36:41.975009+0000 mgr.vm01.nwhpas (mgr.14227) 881 : audit [DBG] from='client.16488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: cluster 2026-04-16T19:36:42.507710+0000 mgr.vm01.nwhpas (mgr.14227) 882 : cluster [DBG] pgmap v489: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: cluster 2026-04-16T19:36:42.507710+0000 mgr.vm01.nwhpas (mgr.14227) 882 : cluster [DBG] pgmap v489: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: audit 2026-04-16T19:36:42.911921+0000 mon.vm01 (mon.0) 1271 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: audit 2026-04-16T19:36:42.911921+0000 mon.vm01 (mon.0) 1271 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: audit 2026-04-16T19:36:42.912559+0000 mon.vm01 (mon.0) 1272 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: audit 2026-04-16T19:36:42.912559+0000 mon.vm01 (mon.0) 1272 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: cluster 2026-04-16T19:36:42.913571+0000 mgr.vm01.nwhpas (mgr.14227) 883 : cluster [DBG] pgmap v490: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: cluster 2026-04-16T19:36:42.913571+0000 mgr.vm01.nwhpas (mgr.14227) 883 : cluster [DBG] pgmap v490: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: cluster 2026-04-16T19:36:42.913990+0000 mgr.vm01.nwhpas (mgr.14227) 884 : cluster [DBG] pgmap v491: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: cluster 2026-04-16T19:36:42.913990+0000 mgr.vm01.nwhpas (mgr.14227) 884 : cluster [DBG] pgmap v491: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: audit 2026-04-16T19:36:42.918018+0000 mon.vm01 (mon.0) 1273 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: audit 2026-04-16T19:36:42.918018+0000 mon.vm01 (mon.0) 1273 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: audit 2026-04-16T19:36:42.919528+0000 mon.vm01 (mon.0) 1274 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:36:43.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:43 vm04 bash[34817]: audit 2026-04-16T19:36:42.919528+0000 mon.vm01 (mon.0) 1274 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:36:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: audit 2026-04-16T19:36:41.975009+0000 mgr.vm01.nwhpas (mgr.14227) 881 : audit [DBG] from='client.16488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: audit 2026-04-16T19:36:41.975009+0000 mgr.vm01.nwhpas (mgr.14227) 881 : audit [DBG] from='client.16488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: cluster 2026-04-16T19:36:42.507710+0000 mgr.vm01.nwhpas (mgr.14227) 882 : cluster [DBG] pgmap v489: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: cluster 2026-04-16T19:36:42.507710+0000 mgr.vm01.nwhpas (mgr.14227) 882 : cluster [DBG] pgmap v489: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: audit 2026-04-16T19:36:42.911921+0000 mon.vm01 (mon.0) 1271 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:36:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: audit 2026-04-16T19:36:42.911921+0000 mon.vm01 (mon.0) 1271 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:36:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: audit 2026-04-16T19:36:42.912559+0000 mon.vm01 (mon.0) 1272 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:36:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: audit 2026-04-16T19:36:42.912559+0000 mon.vm01 (mon.0) 1272 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:36:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: cluster 2026-04-16T19:36:42.913571+0000 mgr.vm01.nwhpas (mgr.14227) 883 : cluster [DBG] pgmap v490: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: cluster 2026-04-16T19:36:42.913571+0000 mgr.vm01.nwhpas (mgr.14227) 883 : cluster [DBG] pgmap v490: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: cluster 2026-04-16T19:36:42.913990+0000 mgr.vm01.nwhpas (mgr.14227) 884 : cluster [DBG] pgmap v491: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: cluster 2026-04-16T19:36:42.913990+0000 mgr.vm01.nwhpas (mgr.14227) 884 : cluster [DBG] pgmap v491: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: audit 2026-04-16T19:36:42.918018+0000 mon.vm01 (mon.0) 1273 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:36:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: audit 2026-04-16T19:36:42.918018+0000 mon.vm01 (mon.0) 1273 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:36:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: audit 2026-04-16T19:36:42.919528+0000 mon.vm01 (mon.0) 1274 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:36:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:43 vm01 bash[28222]: audit 2026-04-16T19:36:42.919528+0000 mon.vm01 (mon.0) 1274 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:36:45.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:44 vm04 bash[34817]: cluster 2026-04-16T19:36:44.914483+0000 mgr.vm01.nwhpas (mgr.14227) 885 : cluster [DBG] pgmap v492: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:45.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:44 vm04 bash[34817]: cluster 2026-04-16T19:36:44.914483+0000 mgr.vm01.nwhpas (mgr.14227) 885 : cluster [DBG] pgmap v492: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:45.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:44 vm01 bash[28222]: cluster 2026-04-16T19:36:44.914483+0000 mgr.vm01.nwhpas (mgr.14227) 885 : cluster [DBG] pgmap v492: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:45.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:44 vm01 bash[28222]: cluster 2026-04-16T19:36:44.914483+0000 mgr.vm01.nwhpas (mgr.14227) 885 : cluster [DBG] pgmap v492: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:47.447 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:36:47.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:46 vm04 bash[34817]: cluster 2026-04-16T19:36:46.914879+0000 mgr.vm01.nwhpas (mgr.14227) 886 : cluster [DBG] pgmap v493: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:47.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:46 vm04 bash[34817]: cluster 2026-04-16T19:36:46.914879+0000 mgr.vm01.nwhpas (mgr.14227) 886 : cluster [DBG] pgmap v493: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:47.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:46 vm01 bash[28222]: cluster 2026-04-16T19:36:46.914879+0000 mgr.vm01.nwhpas (mgr.14227) 886 : cluster [DBG] pgmap v493: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:47.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:46 vm01 bash[28222]: cluster 2026-04-16T19:36:46.914879+0000 mgr.vm01.nwhpas (mgr.14227) 886 : cluster [DBG] pgmap v493: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:36:47.632 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:36:47.632 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (12m) 7m ago 13m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:36:47.632 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (7m) 7m ago 13m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:36:47.632 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 2m ago 13m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:36:47.632 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 2m ago 13m - - 2026-04-16T19:36:47.897 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:36:47.898 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:36:47.898 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:36:48.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:47 vm04 bash[34817]: audit 2026-04-16T19:36:47.425412+0000 mgr.vm01.nwhpas (mgr.14227) 887 : audit [DBG] from='client.16496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:48.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:47 vm04 bash[34817]: audit 2026-04-16T19:36:47.425412+0000 mgr.vm01.nwhpas (mgr.14227) 887 : audit [DBG] from='client.16496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:48.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:47 vm04 bash[34817]: audit 2026-04-16T19:36:47.622607+0000 mgr.vm01.nwhpas (mgr.14227) 888 : audit [DBG] from='client.16500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:48.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:47 vm04 bash[34817]: audit 2026-04-16T19:36:47.622607+0000 mgr.vm01.nwhpas (mgr.14227) 888 : audit [DBG] from='client.16500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:48.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:47 vm04 bash[34817]: audit 2026-04-16T19:36:47.892097+0000 mon.vm01 (mon.0) 1275 : audit [DBG] from='client.? 192.168.123.101:0/2422909550' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:48.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:47 vm04 bash[34817]: audit 2026-04-16T19:36:47.892097+0000 mon.vm01 (mon.0) 1275 : audit [DBG] from='client.? 192.168.123.101:0/2422909550' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:48.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:47 vm01 bash[28222]: audit 2026-04-16T19:36:47.425412+0000 mgr.vm01.nwhpas (mgr.14227) 887 : audit [DBG] from='client.16496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:48.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:47 vm01 bash[28222]: audit 2026-04-16T19:36:47.425412+0000 mgr.vm01.nwhpas (mgr.14227) 887 : audit [DBG] from='client.16496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:48.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:47 vm01 bash[28222]: audit 2026-04-16T19:36:47.622607+0000 mgr.vm01.nwhpas (mgr.14227) 888 : audit [DBG] from='client.16500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:48.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:47 vm01 bash[28222]: audit 2026-04-16T19:36:47.622607+0000 mgr.vm01.nwhpas (mgr.14227) 888 : audit [DBG] from='client.16500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:48.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:47 vm01 bash[28222]: audit 2026-04-16T19:36:47.892097+0000 mon.vm01 (mon.0) 1275 : audit [DBG] from='client.? 192.168.123.101:0/2422909550' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:48.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:47 vm01 bash[28222]: audit 2026-04-16T19:36:47.892097+0000 mon.vm01 (mon.0) 1275 : audit [DBG] from='client.? 192.168.123.101:0/2422909550' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:49.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:49 vm04 bash[34817]: cluster 2026-04-16T19:36:48.915747+0000 mgr.vm01.nwhpas (mgr.14227) 889 : cluster [DBG] pgmap v494: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:36:49.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:49 vm04 bash[34817]: cluster 2026-04-16T19:36:48.915747+0000 mgr.vm01.nwhpas (mgr.14227) 889 : cluster [DBG] pgmap v494: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:36:49.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:49 vm01 bash[28222]: cluster 2026-04-16T19:36:48.915747+0000 mgr.vm01.nwhpas (mgr.14227) 889 : cluster [DBG] pgmap v494: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:36:49.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:49 vm01 bash[28222]: cluster 2026-04-16T19:36:48.915747+0000 mgr.vm01.nwhpas (mgr.14227) 889 : cluster [DBG] pgmap v494: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:36:51.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:51 vm04 bash[34817]: cluster 2026-04-16T19:36:50.916227+0000 mgr.vm01.nwhpas (mgr.14227) 890 : cluster [DBG] pgmap v495: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:36:51.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:51 vm04 bash[34817]: cluster 2026-04-16T19:36:50.916227+0000 mgr.vm01.nwhpas (mgr.14227) 890 : cluster [DBG] pgmap v495: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:36:51.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:51 vm01 bash[28222]: cluster 2026-04-16T19:36:50.916227+0000 mgr.vm01.nwhpas (mgr.14227) 890 : cluster [DBG] pgmap v495: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:36:51.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:51 vm01 bash[28222]: cluster 2026-04-16T19:36:50.916227+0000 mgr.vm01.nwhpas (mgr.14227) 890 : cluster [DBG] pgmap v495: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:36:52.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:52 vm04 bash[34817]: audit 2026-04-16T19:36:52.585752+0000 mon.vm01 (mon.0) 1276 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:52.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:52 vm04 bash[34817]: audit 2026-04-16T19:36:52.585752+0000 mon.vm01 (mon.0) 1276 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:52.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:52 vm01 bash[28222]: audit 2026-04-16T19:36:52.585752+0000 mon.vm01 (mon.0) 1276 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:52.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:52 vm01 bash[28222]: audit 2026-04-16T19:36:52.585752+0000 mon.vm01 (mon.0) 1276 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:36:53.152 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:36:53.375 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:36:53.375 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (12m) 7m ago 13m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:36:53.375 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (7m) 7m ago 13m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:36:53.375 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 2m ago 13m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:36:53.375 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 2m ago 13m - - 2026-04-16T19:36:53.676 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:36:53.676 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:36:53.676 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:36:53.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:53 vm04 bash[34817]: cluster 2026-04-16T19:36:52.916734+0000 mgr.vm01.nwhpas (mgr.14227) 891 : cluster [DBG] pgmap v496: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:36:53.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:53 vm04 bash[34817]: cluster 2026-04-16T19:36:52.916734+0000 mgr.vm01.nwhpas (mgr.14227) 891 : cluster [DBG] pgmap v496: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:36:53.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:53 vm01 bash[28222]: cluster 2026-04-16T19:36:52.916734+0000 mgr.vm01.nwhpas (mgr.14227) 891 : cluster [DBG] pgmap v496: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:36:53.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:53 vm01 bash[28222]: cluster 2026-04-16T19:36:52.916734+0000 mgr.vm01.nwhpas (mgr.14227) 891 : cluster [DBG] pgmap v496: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:36:54.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:54 vm04 bash[34817]: audit 2026-04-16T19:36:53.129118+0000 mgr.vm01.nwhpas (mgr.14227) 892 : audit [DBG] from='client.16508 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:54.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:54 vm04 bash[34817]: audit 2026-04-16T19:36:53.129118+0000 mgr.vm01.nwhpas (mgr.14227) 892 : audit [DBG] from='client.16508 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:54.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:54 vm04 bash[34817]: audit 2026-04-16T19:36:53.365814+0000 mgr.vm01.nwhpas (mgr.14227) 893 : audit [DBG] from='client.16512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:54.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:54 vm04 bash[34817]: audit 2026-04-16T19:36:53.365814+0000 mgr.vm01.nwhpas (mgr.14227) 893 : audit [DBG] from='client.16512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:54.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:54 vm04 bash[34817]: audit 2026-04-16T19:36:53.670592+0000 mon.vm01 (mon.0) 1277 : audit [DBG] from='client.? 192.168.123.101:0/3132881217' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:54.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:54 vm04 bash[34817]: audit 2026-04-16T19:36:53.670592+0000 mon.vm01 (mon.0) 1277 : audit [DBG] from='client.? 192.168.123.101:0/3132881217' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:54.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:54 vm01 bash[28222]: audit 2026-04-16T19:36:53.129118+0000 mgr.vm01.nwhpas (mgr.14227) 892 : audit [DBG] from='client.16508 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:54.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:54 vm01 bash[28222]: audit 2026-04-16T19:36:53.129118+0000 mgr.vm01.nwhpas (mgr.14227) 892 : audit [DBG] from='client.16508 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:54.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:54 vm01 bash[28222]: audit 2026-04-16T19:36:53.365814+0000 mgr.vm01.nwhpas (mgr.14227) 893 : audit [DBG] from='client.16512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:54.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:54 vm01 bash[28222]: audit 2026-04-16T19:36:53.365814+0000 mgr.vm01.nwhpas (mgr.14227) 893 : audit [DBG] from='client.16512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:54.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:54 vm01 bash[28222]: audit 2026-04-16T19:36:53.670592+0000 mon.vm01 (mon.0) 1277 : audit [DBG] from='client.? 192.168.123.101:0/3132881217' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:54.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:54 vm01 bash[28222]: audit 2026-04-16T19:36:53.670592+0000 mon.vm01 (mon.0) 1277 : audit [DBG] from='client.? 192.168.123.101:0/3132881217' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:36:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:55 vm04 bash[34817]: cluster 2026-04-16T19:36:54.917296+0000 mgr.vm01.nwhpas (mgr.14227) 894 : cluster [DBG] pgmap v497: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:55.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:55 vm04 bash[34817]: cluster 2026-04-16T19:36:54.917296+0000 mgr.vm01.nwhpas (mgr.14227) 894 : cluster [DBG] pgmap v497: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:55 vm01 bash[28222]: cluster 2026-04-16T19:36:54.917296+0000 mgr.vm01.nwhpas (mgr.14227) 894 : cluster [DBG] pgmap v497: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:55.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:55 vm01 bash[28222]: cluster 2026-04-16T19:36:54.917296+0000 mgr.vm01.nwhpas (mgr.14227) 894 : cluster [DBG] pgmap v497: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:57.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:56 vm04 bash[34817]: cluster 2026-04-16T19:36:56.917815+0000 mgr.vm01.nwhpas (mgr.14227) 895 : cluster [DBG] pgmap v498: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:57.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:56 vm04 bash[34817]: cluster 2026-04-16T19:36:56.917815+0000 mgr.vm01.nwhpas (mgr.14227) 895 : cluster [DBG] pgmap v498: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:57.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:56 vm01 bash[28222]: cluster 2026-04-16T19:36:56.917815+0000 mgr.vm01.nwhpas (mgr.14227) 895 : cluster [DBG] pgmap v498: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:57.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:56 vm01 bash[28222]: cluster 2026-04-16T19:36:56.917815+0000 mgr.vm01.nwhpas (mgr.14227) 895 : cluster [DBG] pgmap v498: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:36:58.914 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:36:59.140 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:36:59.140 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (13m) 7m ago 13m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:36:59.140 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (7m) 7m ago 13m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:36:59.140 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 2m ago 13m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:36:59.140 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 2m ago 13m - - 2026-04-16T19:36:59.418 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:36:59.418 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:36:59.418 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:36:59.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:58 vm04 bash[34817]: audit 2026-04-16T19:36:58.885583+0000 mgr.vm01.nwhpas (mgr.14227) 896 : audit [DBG] from='client.16520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:59.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:58 vm04 bash[34817]: audit 2026-04-16T19:36:58.885583+0000 mgr.vm01.nwhpas (mgr.14227) 896 : audit [DBG] from='client.16520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:59.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:58 vm04 bash[34817]: cluster 2026-04-16T19:36:58.918433+0000 mgr.vm01.nwhpas (mgr.14227) 897 : cluster [DBG] pgmap v499: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:36:59.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:36:58 vm04 bash[34817]: cluster 2026-04-16T19:36:58.918433+0000 mgr.vm01.nwhpas (mgr.14227) 897 : cluster [DBG] pgmap v499: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:36:59.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:58 vm01 bash[28222]: audit 2026-04-16T19:36:58.885583+0000 mgr.vm01.nwhpas (mgr.14227) 896 : audit [DBG] from='client.16520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:59.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:58 vm01 bash[28222]: audit 2026-04-16T19:36:58.885583+0000 mgr.vm01.nwhpas (mgr.14227) 896 : audit [DBG] from='client.16520 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:36:59.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:58 vm01 bash[28222]: cluster 2026-04-16T19:36:58.918433+0000 mgr.vm01.nwhpas (mgr.14227) 897 : cluster [DBG] pgmap v499: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:36:59.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:36:58 vm01 bash[28222]: cluster 2026-04-16T19:36:58.918433+0000 mgr.vm01.nwhpas (mgr.14227) 897 : cluster [DBG] pgmap v499: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:37:00.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:00 vm04 bash[34817]: audit 2026-04-16T19:36:59.130281+0000 mgr.vm01.nwhpas (mgr.14227) 898 : audit [DBG] from='client.16524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:00.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:00 vm04 bash[34817]: audit 2026-04-16T19:36:59.130281+0000 mgr.vm01.nwhpas (mgr.14227) 898 : audit [DBG] from='client.16524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:00.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:00 vm04 bash[34817]: audit 2026-04-16T19:36:59.411631+0000 mon.vm01 (mon.0) 1278 : audit [DBG] from='client.? 192.168.123.101:0/778922675' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:00.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:00 vm04 bash[34817]: audit 2026-04-16T19:36:59.411631+0000 mon.vm01 (mon.0) 1278 : audit [DBG] from='client.? 192.168.123.101:0/778922675' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:00.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:00 vm01 bash[28222]: audit 2026-04-16T19:36:59.130281+0000 mgr.vm01.nwhpas (mgr.14227) 898 : audit [DBG] from='client.16524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:00.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:00 vm01 bash[28222]: audit 2026-04-16T19:36:59.130281+0000 mgr.vm01.nwhpas (mgr.14227) 898 : audit [DBG] from='client.16524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:00.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:00 vm01 bash[28222]: audit 2026-04-16T19:36:59.411631+0000 mon.vm01 (mon.0) 1278 : audit [DBG] from='client.? 192.168.123.101:0/778922675' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:00.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:00 vm01 bash[28222]: audit 2026-04-16T19:36:59.411631+0000 mon.vm01 (mon.0) 1278 : audit [DBG] from='client.? 192.168.123.101:0/778922675' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:01.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:01 vm04 bash[34817]: cluster 2026-04-16T19:37:00.919106+0000 mgr.vm01.nwhpas (mgr.14227) 899 : cluster [DBG] pgmap v500: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:01.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:01 vm04 bash[34817]: cluster 2026-04-16T19:37:00.919106+0000 mgr.vm01.nwhpas (mgr.14227) 899 : cluster [DBG] pgmap v500: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:01.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:01 vm01 bash[28222]: cluster 2026-04-16T19:37:00.919106+0000 mgr.vm01.nwhpas (mgr.14227) 899 : cluster [DBG] pgmap v500: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:01.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:01 vm01 bash[28222]: cluster 2026-04-16T19:37:00.919106+0000 mgr.vm01.nwhpas (mgr.14227) 899 : cluster [DBG] pgmap v500: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:03.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:02 vm04 bash[34817]: cluster 2026-04-16T19:37:02.919498+0000 mgr.vm01.nwhpas (mgr.14227) 900 : cluster [DBG] pgmap v501: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:03.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:02 vm04 bash[34817]: cluster 2026-04-16T19:37:02.919498+0000 mgr.vm01.nwhpas (mgr.14227) 900 : cluster [DBG] pgmap v501: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:03.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:02 vm01 bash[28222]: cluster 2026-04-16T19:37:02.919498+0000 mgr.vm01.nwhpas (mgr.14227) 900 : cluster [DBG] pgmap v501: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:03.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:02 vm01 bash[28222]: cluster 2026-04-16T19:37:02.919498+0000 mgr.vm01.nwhpas (mgr.14227) 900 : cluster [DBG] pgmap v501: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:04.638 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:37:04.821 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:37:04.821 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (13m) 7m ago 14m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:37:04.821 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (7m) 7m ago 13m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:37:04.821 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 2m ago 14m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:37:04.821 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 2m ago 14m - - 2026-04-16T19:37:05.055 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:37:05.055 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:37:05.055 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:37:05.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:04 vm04 bash[34817]: audit 2026-04-16T19:37:04.614667+0000 mgr.vm01.nwhpas (mgr.14227) 901 : audit [DBG] from='client.16532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:05.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:04 vm04 bash[34817]: audit 2026-04-16T19:37:04.614667+0000 mgr.vm01.nwhpas (mgr.14227) 901 : audit [DBG] from='client.16532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:05.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:04 vm04 bash[34817]: audit 2026-04-16T19:37:04.812677+0000 mgr.vm01.nwhpas (mgr.14227) 902 : audit [DBG] from='client.16536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:05.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:04 vm04 bash[34817]: audit 2026-04-16T19:37:04.812677+0000 mgr.vm01.nwhpas (mgr.14227) 902 : audit [DBG] from='client.16536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:05.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:04 vm04 bash[34817]: cluster 2026-04-16T19:37:04.919854+0000 mgr.vm01.nwhpas (mgr.14227) 903 : cluster [DBG] pgmap v502: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:05.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:04 vm04 bash[34817]: cluster 2026-04-16T19:37:04.919854+0000 mgr.vm01.nwhpas (mgr.14227) 903 : cluster [DBG] pgmap v502: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:05.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:04 vm01 bash[28222]: audit 2026-04-16T19:37:04.614667+0000 mgr.vm01.nwhpas (mgr.14227) 901 : audit [DBG] from='client.16532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:05.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:04 vm01 bash[28222]: audit 2026-04-16T19:37:04.614667+0000 mgr.vm01.nwhpas (mgr.14227) 901 : audit [DBG] from='client.16532 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:05.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:04 vm01 bash[28222]: audit 2026-04-16T19:37:04.812677+0000 mgr.vm01.nwhpas (mgr.14227) 902 : audit [DBG] from='client.16536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:05.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:04 vm01 bash[28222]: audit 2026-04-16T19:37:04.812677+0000 mgr.vm01.nwhpas (mgr.14227) 902 : audit [DBG] from='client.16536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:05.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:04 vm01 bash[28222]: cluster 2026-04-16T19:37:04.919854+0000 mgr.vm01.nwhpas (mgr.14227) 903 : cluster [DBG] pgmap v502: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:05.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:04 vm01 bash[28222]: cluster 2026-04-16T19:37:04.919854+0000 mgr.vm01.nwhpas (mgr.14227) 903 : cluster [DBG] pgmap v502: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:06.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:06 vm04 bash[34817]: audit 2026-04-16T19:37:05.049965+0000 mon.vm01 (mon.0) 1279 : audit [DBG] from='client.? 192.168.123.101:0/1066115695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:06.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:06 vm04 bash[34817]: audit 2026-04-16T19:37:05.049965+0000 mon.vm01 (mon.0) 1279 : audit [DBG] from='client.? 192.168.123.101:0/1066115695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:06.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:05 vm01 bash[28222]: audit 2026-04-16T19:37:05.049965+0000 mon.vm01 (mon.0) 1279 : audit [DBG] from='client.? 192.168.123.101:0/1066115695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:06.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:05 vm01 bash[28222]: audit 2026-04-16T19:37:05.049965+0000 mon.vm01 (mon.0) 1279 : audit [DBG] from='client.? 192.168.123.101:0/1066115695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:07.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:07 vm04 bash[34817]: cluster 2026-04-16T19:37:06.920233+0000 mgr.vm01.nwhpas (mgr.14227) 904 : cluster [DBG] pgmap v503: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:07.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:07 vm04 bash[34817]: cluster 2026-04-16T19:37:06.920233+0000 mgr.vm01.nwhpas (mgr.14227) 904 : cluster [DBG] pgmap v503: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:07.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:07 vm01 bash[28222]: cluster 2026-04-16T19:37:06.920233+0000 mgr.vm01.nwhpas (mgr.14227) 904 : cluster [DBG] pgmap v503: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:07.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:07 vm01 bash[28222]: cluster 2026-04-16T19:37:06.920233+0000 mgr.vm01.nwhpas (mgr.14227) 904 : cluster [DBG] pgmap v503: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:08.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:08 vm04 bash[34817]: audit 2026-04-16T19:37:07.585866+0000 mon.vm01 (mon.0) 1280 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:08.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:08 vm04 bash[34817]: audit 2026-04-16T19:37:07.585866+0000 mon.vm01 (mon.0) 1280 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:08.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:08 vm01 bash[28222]: audit 2026-04-16T19:37:07.585866+0000 mon.vm01 (mon.0) 1280 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:08.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:08 vm01 bash[28222]: audit 2026-04-16T19:37:07.585866+0000 mon.vm01 (mon.0) 1280 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:09.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:09 vm04 bash[34817]: cluster 2026-04-16T19:37:08.920707+0000 mgr.vm01.nwhpas (mgr.14227) 905 : cluster [DBG] pgmap v504: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:09.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:09 vm04 bash[34817]: cluster 2026-04-16T19:37:08.920707+0000 mgr.vm01.nwhpas (mgr.14227) 905 : cluster [DBG] pgmap v504: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:09.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:09 vm01 bash[28222]: cluster 2026-04-16T19:37:08.920707+0000 mgr.vm01.nwhpas (mgr.14227) 905 : cluster [DBG] pgmap v504: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:09.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:09 vm01 bash[28222]: cluster 2026-04-16T19:37:08.920707+0000 mgr.vm01.nwhpas (mgr.14227) 905 : cluster [DBG] pgmap v504: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:10.260 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:37:10.463 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:37:10.463 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (13m) 7m ago 14m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:37:10.463 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (7m) 7m ago 14m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:37:10.463 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 2m ago 14m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:37:10.463 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 2m ago 14m - - 2026-04-16T19:37:10.719 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:37:10.719 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:37:10.719 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:37:11.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:10 vm04 bash[34817]: audit 2026-04-16T19:37:10.713440+0000 mon.vm01 (mon.0) 1281 : audit [DBG] from='client.? 192.168.123.101:0/392403392' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:11.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:10 vm04 bash[34817]: audit 2026-04-16T19:37:10.713440+0000 mon.vm01 (mon.0) 1281 : audit [DBG] from='client.? 192.168.123.101:0/392403392' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:11.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:10 vm01 bash[28222]: audit 2026-04-16T19:37:10.713440+0000 mon.vm01 (mon.0) 1281 : audit [DBG] from='client.? 192.168.123.101:0/392403392' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:11.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:10 vm01 bash[28222]: audit 2026-04-16T19:37:10.713440+0000 mon.vm01 (mon.0) 1281 : audit [DBG] from='client.? 192.168.123.101:0/392403392' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:11 vm04 bash[34817]: audit 2026-04-16T19:37:10.235825+0000 mgr.vm01.nwhpas (mgr.14227) 906 : audit [DBG] from='client.16544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:11 vm04 bash[34817]: audit 2026-04-16T19:37:10.235825+0000 mgr.vm01.nwhpas (mgr.14227) 906 : audit [DBG] from='client.16544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:11 vm04 bash[34817]: audit 2026-04-16T19:37:10.455049+0000 mgr.vm01.nwhpas (mgr.14227) 907 : audit [DBG] from='client.16548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:11 vm04 bash[34817]: audit 2026-04-16T19:37:10.455049+0000 mgr.vm01.nwhpas (mgr.14227) 907 : audit [DBG] from='client.16548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:11 vm04 bash[34817]: cluster 2026-04-16T19:37:10.921244+0000 mgr.vm01.nwhpas (mgr.14227) 908 : cluster [DBG] pgmap v505: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:11 vm04 bash[34817]: cluster 2026-04-16T19:37:10.921244+0000 mgr.vm01.nwhpas (mgr.14227) 908 : cluster [DBG] pgmap v505: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:12.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:11 vm01 bash[28222]: audit 2026-04-16T19:37:10.235825+0000 mgr.vm01.nwhpas (mgr.14227) 906 : audit [DBG] from='client.16544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:12.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:11 vm01 bash[28222]: audit 2026-04-16T19:37:10.235825+0000 mgr.vm01.nwhpas (mgr.14227) 906 : audit [DBG] from='client.16544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:12.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:11 vm01 bash[28222]: audit 2026-04-16T19:37:10.455049+0000 mgr.vm01.nwhpas (mgr.14227) 907 : audit [DBG] from='client.16548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:12.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:11 vm01 bash[28222]: audit 2026-04-16T19:37:10.455049+0000 mgr.vm01.nwhpas (mgr.14227) 907 : audit [DBG] from='client.16548 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:12.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:11 vm01 bash[28222]: cluster 2026-04-16T19:37:10.921244+0000 mgr.vm01.nwhpas (mgr.14227) 908 : cluster [DBG] pgmap v505: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:12.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:11 vm01 bash[28222]: cluster 2026-04-16T19:37:10.921244+0000 mgr.vm01.nwhpas (mgr.14227) 908 : cluster [DBG] pgmap v505: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:13.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:12 vm04 bash[34817]: cluster 2026-04-16T19:37:12.921625+0000 mgr.vm01.nwhpas (mgr.14227) 909 : cluster [DBG] pgmap v506: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:13.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:12 vm04 bash[34817]: cluster 2026-04-16T19:37:12.921625+0000 mgr.vm01.nwhpas (mgr.14227) 909 : cluster [DBG] pgmap v506: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:13.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:12 vm01 bash[28222]: cluster 2026-04-16T19:37:12.921625+0000 mgr.vm01.nwhpas (mgr.14227) 909 : cluster [DBG] pgmap v506: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:13.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:12 vm01 bash[28222]: cluster 2026-04-16T19:37:12.921625+0000 mgr.vm01.nwhpas (mgr.14227) 909 : cluster [DBG] pgmap v506: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:15.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:15 vm04 bash[34817]: cluster 2026-04-16T19:37:14.922099+0000 mgr.vm01.nwhpas (mgr.14227) 910 : cluster [DBG] pgmap v507: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:15.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:15 vm04 bash[34817]: cluster 2026-04-16T19:37:14.922099+0000 mgr.vm01.nwhpas (mgr.14227) 910 : cluster [DBG] pgmap v507: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:15.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:15 vm01 bash[28222]: cluster 2026-04-16T19:37:14.922099+0000 mgr.vm01.nwhpas (mgr.14227) 910 : cluster [DBG] pgmap v507: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:15.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:15 vm01 bash[28222]: cluster 2026-04-16T19:37:14.922099+0000 mgr.vm01.nwhpas (mgr.14227) 910 : cluster [DBG] pgmap v507: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:15.938 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:37:16.125 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:37:16.125 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (13m) 7m ago 14m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:37:16.125 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (8m) 7m ago 14m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:37:16.125 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (2m) 2m ago 14m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:37:16.125 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 2m ago 14m - - 2026-04-16T19:37:16.363 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:37:16.363 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:37:16.363 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:37:16.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:16 vm04 bash[34817]: audit 2026-04-16T19:37:15.914616+0000 mgr.vm01.nwhpas (mgr.14227) 911 : audit [DBG] from='client.16556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:16.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:16 vm04 bash[34817]: audit 2026-04-16T19:37:15.914616+0000 mgr.vm01.nwhpas (mgr.14227) 911 : audit [DBG] from='client.16556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:16.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:16 vm01 bash[28222]: audit 2026-04-16T19:37:15.914616+0000 mgr.vm01.nwhpas (mgr.14227) 911 : audit [DBG] from='client.16556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:16.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:16 vm01 bash[28222]: audit 2026-04-16T19:37:15.914616+0000 mgr.vm01.nwhpas (mgr.14227) 911 : audit [DBG] from='client.16556 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:17.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:17 vm04 bash[34817]: audit 2026-04-16T19:37:16.117087+0000 mgr.vm01.nwhpas (mgr.14227) 912 : audit [DBG] from='client.16560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:17.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:17 vm04 bash[34817]: audit 2026-04-16T19:37:16.117087+0000 mgr.vm01.nwhpas (mgr.14227) 912 : audit [DBG] from='client.16560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:17.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:17 vm04 bash[34817]: audit 2026-04-16T19:37:16.357476+0000 mon.vm01 (mon.0) 1282 : audit [DBG] from='client.? 192.168.123.101:0/663547020' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:17.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:17 vm04 bash[34817]: audit 2026-04-16T19:37:16.357476+0000 mon.vm01 (mon.0) 1282 : audit [DBG] from='client.? 192.168.123.101:0/663547020' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:17.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:17 vm04 bash[34817]: cluster 2026-04-16T19:37:16.922508+0000 mgr.vm01.nwhpas (mgr.14227) 913 : cluster [DBG] pgmap v508: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:17.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:17 vm04 bash[34817]: cluster 2026-04-16T19:37:16.922508+0000 mgr.vm01.nwhpas (mgr.14227) 913 : cluster [DBG] pgmap v508: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:17.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:17 vm01 bash[28222]: audit 2026-04-16T19:37:16.117087+0000 mgr.vm01.nwhpas (mgr.14227) 912 : audit [DBG] from='client.16560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:17.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:17 vm01 bash[28222]: audit 2026-04-16T19:37:16.117087+0000 mgr.vm01.nwhpas (mgr.14227) 912 : audit [DBG] from='client.16560 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:17.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:17 vm01 bash[28222]: audit 2026-04-16T19:37:16.357476+0000 mon.vm01 (mon.0) 1282 : audit [DBG] from='client.? 192.168.123.101:0/663547020' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:17.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:17 vm01 bash[28222]: audit 2026-04-16T19:37:16.357476+0000 mon.vm01 (mon.0) 1282 : audit [DBG] from='client.? 192.168.123.101:0/663547020' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:17.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:17 vm01 bash[28222]: cluster 2026-04-16T19:37:16.922508+0000 mgr.vm01.nwhpas (mgr.14227) 913 : cluster [DBG] pgmap v508: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:17.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:17 vm01 bash[28222]: cluster 2026-04-16T19:37:16.922508+0000 mgr.vm01.nwhpas (mgr.14227) 913 : cluster [DBG] pgmap v508: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:19.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:19 vm04 bash[34817]: cluster 2026-04-16T19:37:18.923071+0000 mgr.vm01.nwhpas (mgr.14227) 914 : cluster [DBG] pgmap v509: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:19.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:19 vm04 bash[34817]: cluster 2026-04-16T19:37:18.923071+0000 mgr.vm01.nwhpas (mgr.14227) 914 : cluster [DBG] pgmap v509: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:19.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:19 vm01 bash[28222]: cluster 2026-04-16T19:37:18.923071+0000 mgr.vm01.nwhpas (mgr.14227) 914 : cluster [DBG] pgmap v509: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:19.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:19 vm01 bash[28222]: cluster 2026-04-16T19:37:18.923071+0000 mgr.vm01.nwhpas (mgr.14227) 914 : cluster [DBG] pgmap v509: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:21.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:20 vm04 bash[34817]: cluster 2026-04-16T19:37:20.923519+0000 mgr.vm01.nwhpas (mgr.14227) 915 : cluster [DBG] pgmap v510: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:21.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:20 vm04 bash[34817]: cluster 2026-04-16T19:37:20.923519+0000 mgr.vm01.nwhpas (mgr.14227) 915 : cluster [DBG] pgmap v510: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:21.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:20 vm01 bash[28222]: cluster 2026-04-16T19:37:20.923519+0000 mgr.vm01.nwhpas (mgr.14227) 915 : cluster [DBG] pgmap v510: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:21.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:20 vm01 bash[28222]: cluster 2026-04-16T19:37:20.923519+0000 mgr.vm01.nwhpas (mgr.14227) 915 : cluster [DBG] pgmap v510: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:21.576 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:37:21.762 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:37:21.762 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (13m) 8m ago 14m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:37:21.762 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (8m) 8m ago 14m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:37:21.762 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 2m ago 14m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:37:21.762 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 2m ago 14m - - 2026-04-16T19:37:21.990 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:37:21.990 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:37:21.990 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:37:22.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:22 vm04 bash[34817]: audit 2026-04-16T19:37:21.553664+0000 mgr.vm01.nwhpas (mgr.14227) 916 : audit [DBG] from='client.16568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:22.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:22 vm04 bash[34817]: audit 2026-04-16T19:37:21.553664+0000 mgr.vm01.nwhpas (mgr.14227) 916 : audit [DBG] from='client.16568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:22.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:22 vm04 bash[34817]: audit 2026-04-16T19:37:21.753811+0000 mgr.vm01.nwhpas (mgr.14227) 917 : audit [DBG] from='client.16572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:22.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:22 vm04 bash[34817]: audit 2026-04-16T19:37:21.753811+0000 mgr.vm01.nwhpas (mgr.14227) 917 : audit [DBG] from='client.16572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:22.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:22 vm04 bash[34817]: audit 2026-04-16T19:37:21.984417+0000 mon.vm01 (mon.0) 1283 : audit [DBG] from='client.? 192.168.123.101:0/1356748795' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:22.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:22 vm04 bash[34817]: audit 2026-04-16T19:37:21.984417+0000 mon.vm01 (mon.0) 1283 : audit [DBG] from='client.? 192.168.123.101:0/1356748795' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:22.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:22 vm01 bash[28222]: audit 2026-04-16T19:37:21.553664+0000 mgr.vm01.nwhpas (mgr.14227) 916 : audit [DBG] from='client.16568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:22.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:22 vm01 bash[28222]: audit 2026-04-16T19:37:21.553664+0000 mgr.vm01.nwhpas (mgr.14227) 916 : audit [DBG] from='client.16568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:22.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:22 vm01 bash[28222]: audit 2026-04-16T19:37:21.753811+0000 mgr.vm01.nwhpas (mgr.14227) 917 : audit [DBG] from='client.16572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:22.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:22 vm01 bash[28222]: audit 2026-04-16T19:37:21.753811+0000 mgr.vm01.nwhpas (mgr.14227) 917 : audit [DBG] from='client.16572 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:22.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:22 vm01 bash[28222]: audit 2026-04-16T19:37:21.984417+0000 mon.vm01 (mon.0) 1283 : audit [DBG] from='client.? 192.168.123.101:0/1356748795' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:22.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:22 vm01 bash[28222]: audit 2026-04-16T19:37:21.984417+0000 mon.vm01 (mon.0) 1283 : audit [DBG] from='client.? 192.168.123.101:0/1356748795' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:23.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:23 vm04 bash[34817]: audit 2026-04-16T19:37:22.585925+0000 mon.vm01 (mon.0) 1284 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:23.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:23 vm04 bash[34817]: audit 2026-04-16T19:37:22.585925+0000 mon.vm01 (mon.0) 1284 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:23.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:23 vm04 bash[34817]: cluster 2026-04-16T19:37:22.923929+0000 mgr.vm01.nwhpas (mgr.14227) 918 : cluster [DBG] pgmap v511: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:23.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:23 vm04 bash[34817]: cluster 2026-04-16T19:37:22.923929+0000 mgr.vm01.nwhpas (mgr.14227) 918 : cluster [DBG] pgmap v511: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:23.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:23 vm01 bash[28222]: audit 2026-04-16T19:37:22.585925+0000 mon.vm01 (mon.0) 1284 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:23.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:23 vm01 bash[28222]: audit 2026-04-16T19:37:22.585925+0000 mon.vm01 (mon.0) 1284 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:23.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:23 vm01 bash[28222]: cluster 2026-04-16T19:37:22.923929+0000 mgr.vm01.nwhpas (mgr.14227) 918 : cluster [DBG] pgmap v511: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:23.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:23 vm01 bash[28222]: cluster 2026-04-16T19:37:22.923929+0000 mgr.vm01.nwhpas (mgr.14227) 918 : cluster [DBG] pgmap v511: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:25.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:25 vm04 bash[34817]: cluster 2026-04-16T19:37:24.924349+0000 mgr.vm01.nwhpas (mgr.14227) 919 : cluster [DBG] pgmap v512: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:25.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:25 vm04 bash[34817]: cluster 2026-04-16T19:37:24.924349+0000 mgr.vm01.nwhpas (mgr.14227) 919 : cluster [DBG] pgmap v512: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:25.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:24 vm01 bash[28222]: cluster 2026-04-16T19:37:24.924349+0000 mgr.vm01.nwhpas (mgr.14227) 919 : cluster [DBG] pgmap v512: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:25.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:24 vm01 bash[28222]: cluster 2026-04-16T19:37:24.924349+0000 mgr.vm01.nwhpas (mgr.14227) 919 : cluster [DBG] pgmap v512: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:27.205 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:37:27.387 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:37:27.387 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (13m) 8m ago 14m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:37:27.387 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (8m) 8m ago 14m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:37:27.387 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 2m ago 14m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:37:27.387 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 2m ago 14m - - 2026-04-16T19:37:27.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:27 vm04 bash[34817]: cluster 2026-04-16T19:37:26.924724+0000 mgr.vm01.nwhpas (mgr.14227) 920 : cluster [DBG] pgmap v513: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:27.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:27 vm04 bash[34817]: cluster 2026-04-16T19:37:26.924724+0000 mgr.vm01.nwhpas (mgr.14227) 920 : cluster [DBG] pgmap v513: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:27.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:27 vm01 bash[28222]: cluster 2026-04-16T19:37:26.924724+0000 mgr.vm01.nwhpas (mgr.14227) 920 : cluster [DBG] pgmap v513: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:27.466 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:27 vm01 bash[28222]: cluster 2026-04-16T19:37:26.924724+0000 mgr.vm01.nwhpas (mgr.14227) 920 : cluster [DBG] pgmap v513: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:27.612 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:37:27.612 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:37:27.612 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:37:28.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:28 vm04 bash[34817]: audit 2026-04-16T19:37:27.181506+0000 mgr.vm01.nwhpas (mgr.14227) 921 : audit [DBG] from='client.16580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:28.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:28 vm04 bash[34817]: audit 2026-04-16T19:37:27.181506+0000 mgr.vm01.nwhpas (mgr.14227) 921 : audit [DBG] from='client.16580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:28.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:28 vm04 bash[34817]: audit 2026-04-16T19:37:27.378826+0000 mgr.vm01.nwhpas (mgr.14227) 922 : audit [DBG] from='client.16584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:28.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:28 vm04 bash[34817]: audit 2026-04-16T19:37:27.378826+0000 mgr.vm01.nwhpas (mgr.14227) 922 : audit [DBG] from='client.16584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:28.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:28 vm04 bash[34817]: audit 2026-04-16T19:37:27.606723+0000 mon.vm01 (mon.0) 1285 : audit [DBG] from='client.? 192.168.123.101:0/2666287261' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:28.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:28 vm04 bash[34817]: audit 2026-04-16T19:37:27.606723+0000 mon.vm01 (mon.0) 1285 : audit [DBG] from='client.? 192.168.123.101:0/2666287261' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:28.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:28 vm01 bash[28222]: audit 2026-04-16T19:37:27.181506+0000 mgr.vm01.nwhpas (mgr.14227) 921 : audit [DBG] from='client.16580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:28.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:28 vm01 bash[28222]: audit 2026-04-16T19:37:27.181506+0000 mgr.vm01.nwhpas (mgr.14227) 921 : audit [DBG] from='client.16580 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:28.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:28 vm01 bash[28222]: audit 2026-04-16T19:37:27.378826+0000 mgr.vm01.nwhpas (mgr.14227) 922 : audit [DBG] from='client.16584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:28.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:28 vm01 bash[28222]: audit 2026-04-16T19:37:27.378826+0000 mgr.vm01.nwhpas (mgr.14227) 922 : audit [DBG] from='client.16584 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:28.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:28 vm01 bash[28222]: audit 2026-04-16T19:37:27.606723+0000 mon.vm01 (mon.0) 1285 : audit [DBG] from='client.? 192.168.123.101:0/2666287261' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:28.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:28 vm01 bash[28222]: audit 2026-04-16T19:37:27.606723+0000 mon.vm01 (mon.0) 1285 : audit [DBG] from='client.? 192.168.123.101:0/2666287261' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:29.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:29 vm04 bash[34817]: cluster 2026-04-16T19:37:28.925176+0000 mgr.vm01.nwhpas (mgr.14227) 923 : cluster [DBG] pgmap v514: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:37:29.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:29 vm04 bash[34817]: cluster 2026-04-16T19:37:28.925176+0000 mgr.vm01.nwhpas (mgr.14227) 923 : cluster [DBG] pgmap v514: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:37:29.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:29 vm01 bash[28222]: cluster 2026-04-16T19:37:28.925176+0000 mgr.vm01.nwhpas (mgr.14227) 923 : cluster [DBG] pgmap v514: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:37:29.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:29 vm01 bash[28222]: cluster 2026-04-16T19:37:28.925176+0000 mgr.vm01.nwhpas (mgr.14227) 923 : cluster [DBG] pgmap v514: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:37:31.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:31 vm04 bash[34817]: cluster 2026-04-16T19:37:30.925654+0000 mgr.vm01.nwhpas (mgr.14227) 924 : cluster [DBG] pgmap v515: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:31.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:31 vm04 bash[34817]: cluster 2026-04-16T19:37:30.925654+0000 mgr.vm01.nwhpas (mgr.14227) 924 : cluster [DBG] pgmap v515: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:31.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:31 vm01 bash[28222]: cluster 2026-04-16T19:37:30.925654+0000 mgr.vm01.nwhpas (mgr.14227) 924 : cluster [DBG] pgmap v515: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:31.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:31 vm01 bash[28222]: cluster 2026-04-16T19:37:30.925654+0000 mgr.vm01.nwhpas (mgr.14227) 924 : cluster [DBG] pgmap v515: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:32.822 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:37:33.027 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:37:33.027 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (13m) 8m ago 14m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:37:33.027 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (8m) 8m ago 14m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:37:33.027 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 2m ago 14m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:37:33.027 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 2m ago 14m - - 2026-04-16T19:37:33.305 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:37:33.306 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:37:33.306 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:37:33.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:33 vm04 bash[34817]: audit 2026-04-16T19:37:32.796733+0000 mgr.vm01.nwhpas (mgr.14227) 925 : audit [DBG] from='client.16592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:33.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:33 vm04 bash[34817]: audit 2026-04-16T19:37:32.796733+0000 mgr.vm01.nwhpas (mgr.14227) 925 : audit [DBG] from='client.16592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:33.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:33 vm04 bash[34817]: cluster 2026-04-16T19:37:32.926071+0000 mgr.vm01.nwhpas (mgr.14227) 926 : cluster [DBG] pgmap v516: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:33.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:33 vm04 bash[34817]: cluster 2026-04-16T19:37:32.926071+0000 mgr.vm01.nwhpas (mgr.14227) 926 : cluster [DBG] pgmap v516: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:33.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:33 vm01 bash[28222]: audit 2026-04-16T19:37:32.796733+0000 mgr.vm01.nwhpas (mgr.14227) 925 : audit [DBG] from='client.16592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:33.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:33 vm01 bash[28222]: audit 2026-04-16T19:37:32.796733+0000 mgr.vm01.nwhpas (mgr.14227) 925 : audit [DBG] from='client.16592 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:33.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:33 vm01 bash[28222]: cluster 2026-04-16T19:37:32.926071+0000 mgr.vm01.nwhpas (mgr.14227) 926 : cluster [DBG] pgmap v516: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:33.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:33 vm01 bash[28222]: cluster 2026-04-16T19:37:32.926071+0000 mgr.vm01.nwhpas (mgr.14227) 926 : cluster [DBG] pgmap v516: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:34.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:34 vm04 bash[34817]: audit 2026-04-16T19:37:33.018800+0000 mgr.vm01.nwhpas (mgr.14227) 927 : audit [DBG] from='client.16596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:34.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:34 vm04 bash[34817]: audit 2026-04-16T19:37:33.018800+0000 mgr.vm01.nwhpas (mgr.14227) 927 : audit [DBG] from='client.16596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:34.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:34 vm04 bash[34817]: audit 2026-04-16T19:37:33.299738+0000 mon.vm01 (mon.0) 1286 : audit [DBG] from='client.? 192.168.123.101:0/2323994044' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:34.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:34 vm04 bash[34817]: audit 2026-04-16T19:37:33.299738+0000 mon.vm01 (mon.0) 1286 : audit [DBG] from='client.? 192.168.123.101:0/2323994044' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:34.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:34 vm01 bash[28222]: audit 2026-04-16T19:37:33.018800+0000 mgr.vm01.nwhpas (mgr.14227) 927 : audit [DBG] from='client.16596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:34.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:34 vm01 bash[28222]: audit 2026-04-16T19:37:33.018800+0000 mgr.vm01.nwhpas (mgr.14227) 927 : audit [DBG] from='client.16596 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:34.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:34 vm01 bash[28222]: audit 2026-04-16T19:37:33.299738+0000 mon.vm01 (mon.0) 1286 : audit [DBG] from='client.? 192.168.123.101:0/2323994044' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:34.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:34 vm01 bash[28222]: audit 2026-04-16T19:37:33.299738+0000 mon.vm01 (mon.0) 1286 : audit [DBG] from='client.? 192.168.123.101:0/2323994044' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:35.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:35 vm04 bash[34817]: cluster 2026-04-16T19:37:34.926511+0000 mgr.vm01.nwhpas (mgr.14227) 928 : cluster [DBG] pgmap v517: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:35.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:35 vm04 bash[34817]: cluster 2026-04-16T19:37:34.926511+0000 mgr.vm01.nwhpas (mgr.14227) 928 : cluster [DBG] pgmap v517: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:35.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:35 vm01 bash[28222]: cluster 2026-04-16T19:37:34.926511+0000 mgr.vm01.nwhpas (mgr.14227) 928 : cluster [DBG] pgmap v517: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:35.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:35 vm01 bash[28222]: cluster 2026-04-16T19:37:34.926511+0000 mgr.vm01.nwhpas (mgr.14227) 928 : cluster [DBG] pgmap v517: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:37.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:37 vm04 bash[34817]: cluster 2026-04-16T19:37:36.926940+0000 mgr.vm01.nwhpas (mgr.14227) 929 : cluster [DBG] pgmap v518: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:37.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:37 vm04 bash[34817]: cluster 2026-04-16T19:37:36.926940+0000 mgr.vm01.nwhpas (mgr.14227) 929 : cluster [DBG] pgmap v518: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:37.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:37 vm01 bash[28222]: cluster 2026-04-16T19:37:36.926940+0000 mgr.vm01.nwhpas (mgr.14227) 929 : cluster [DBG] pgmap v518: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:37.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:37 vm01 bash[28222]: cluster 2026-04-16T19:37:36.926940+0000 mgr.vm01.nwhpas (mgr.14227) 929 : cluster [DBG] pgmap v518: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:38.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:38 vm04 bash[34817]: audit 2026-04-16T19:37:37.586358+0000 mon.vm01 (mon.0) 1287 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:38.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:38 vm04 bash[34817]: audit 2026-04-16T19:37:37.586358+0000 mon.vm01 (mon.0) 1287 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:38.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:38 vm01 bash[28222]: audit 2026-04-16T19:37:37.586358+0000 mon.vm01 (mon.0) 1287 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:38.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:38 vm01 bash[28222]: audit 2026-04-16T19:37:37.586358+0000 mon.vm01 (mon.0) 1287 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:38.536 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:37:38.730 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:37:38.730 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (13m) 8m ago 14m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:37:38.730 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (8m) 8m ago 14m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:37:38.731 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 2m ago 14m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:37:38.731 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 2m ago 14m - - 2026-04-16T19:37:38.968 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:37:38.968 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:37:38.968 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:37:39.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:39 vm04 bash[34817]: audit 2026-04-16T19:37:38.513176+0000 mgr.vm01.nwhpas (mgr.14227) 930 : audit [DBG] from='client.25703 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:39.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:39 vm04 bash[34817]: audit 2026-04-16T19:37:38.513176+0000 mgr.vm01.nwhpas (mgr.14227) 930 : audit [DBG] from='client.25703 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:39.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:39 vm04 bash[34817]: audit 2026-04-16T19:37:38.722139+0000 mgr.vm01.nwhpas (mgr.14227) 931 : audit [DBG] from='client.16606 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:39.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:39 vm04 bash[34817]: audit 2026-04-16T19:37:38.722139+0000 mgr.vm01.nwhpas (mgr.14227) 931 : audit [DBG] from='client.16606 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:39.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:39 vm04 bash[34817]: cluster 2026-04-16T19:37:38.927422+0000 mgr.vm01.nwhpas (mgr.14227) 932 : cluster [DBG] pgmap v519: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:39.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:39 vm04 bash[34817]: cluster 2026-04-16T19:37:38.927422+0000 mgr.vm01.nwhpas (mgr.14227) 932 : cluster [DBG] pgmap v519: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:39.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:39 vm04 bash[34817]: audit 2026-04-16T19:37:38.962014+0000 mon.vm01 (mon.0) 1288 : audit [DBG] from='client.? 192.168.123.101:0/2185545590' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:39.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:39 vm04 bash[34817]: audit 2026-04-16T19:37:38.962014+0000 mon.vm01 (mon.0) 1288 : audit [DBG] from='client.? 192.168.123.101:0/2185545590' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:39.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:39 vm01 bash[28222]: audit 2026-04-16T19:37:38.513176+0000 mgr.vm01.nwhpas (mgr.14227) 930 : audit [DBG] from='client.25703 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:39.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:39 vm01 bash[28222]: audit 2026-04-16T19:37:38.513176+0000 mgr.vm01.nwhpas (mgr.14227) 930 : audit [DBG] from='client.25703 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:39.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:39 vm01 bash[28222]: audit 2026-04-16T19:37:38.722139+0000 mgr.vm01.nwhpas (mgr.14227) 931 : audit [DBG] from='client.16606 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:39.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:39 vm01 bash[28222]: audit 2026-04-16T19:37:38.722139+0000 mgr.vm01.nwhpas (mgr.14227) 931 : audit [DBG] from='client.16606 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:39.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:39 vm01 bash[28222]: cluster 2026-04-16T19:37:38.927422+0000 mgr.vm01.nwhpas (mgr.14227) 932 : cluster [DBG] pgmap v519: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:39.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:39 vm01 bash[28222]: cluster 2026-04-16T19:37:38.927422+0000 mgr.vm01.nwhpas (mgr.14227) 932 : cluster [DBG] pgmap v519: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:37:39.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:39 vm01 bash[28222]: audit 2026-04-16T19:37:38.962014+0000 mon.vm01 (mon.0) 1288 : audit [DBG] from='client.? 192.168.123.101:0/2185545590' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:39.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:39 vm01 bash[28222]: audit 2026-04-16T19:37:38.962014+0000 mon.vm01 (mon.0) 1288 : audit [DBG] from='client.? 192.168.123.101:0/2185545590' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:41.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:41 vm04 bash[34817]: cluster 2026-04-16T19:37:40.927859+0000 mgr.vm01.nwhpas (mgr.14227) 933 : cluster [DBG] pgmap v520: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:41.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:41 vm04 bash[34817]: cluster 2026-04-16T19:37:40.927859+0000 mgr.vm01.nwhpas (mgr.14227) 933 : cluster [DBG] pgmap v520: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:41.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:41 vm01 bash[28222]: cluster 2026-04-16T19:37:40.927859+0000 mgr.vm01.nwhpas (mgr.14227) 933 : cluster [DBG] pgmap v520: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:41.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:41 vm01 bash[28222]: cluster 2026-04-16T19:37:40.927859+0000 mgr.vm01.nwhpas (mgr.14227) 933 : cluster [DBG] pgmap v520: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:43.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:42 vm04 bash[34817]: cluster 2026-04-16T19:37:42.928314+0000 mgr.vm01.nwhpas (mgr.14227) 934 : cluster [DBG] pgmap v521: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:43.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:42 vm04 bash[34817]: cluster 2026-04-16T19:37:42.928314+0000 mgr.vm01.nwhpas (mgr.14227) 934 : cluster [DBG] pgmap v521: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:43.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:42 vm04 bash[34817]: audit 2026-04-16T19:37:42.933998+0000 mon.vm01 (mon.0) 1289 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:37:43.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:42 vm04 bash[34817]: audit 2026-04-16T19:37:42.933998+0000 mon.vm01 (mon.0) 1289 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:37:43.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:42 vm01 bash[28222]: cluster 2026-04-16T19:37:42.928314+0000 mgr.vm01.nwhpas (mgr.14227) 934 : cluster [DBG] pgmap v521: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:43.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:42 vm01 bash[28222]: cluster 2026-04-16T19:37:42.928314+0000 mgr.vm01.nwhpas (mgr.14227) 934 : cluster [DBG] pgmap v521: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:43.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:42 vm01 bash[28222]: audit 2026-04-16T19:37:42.933998+0000 mon.vm01 (mon.0) 1289 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:37:43.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:42 vm01 bash[28222]: audit 2026-04-16T19:37:42.933998+0000 mon.vm01 (mon.0) 1289 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:37:44.186 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:37:44.383 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:37:44.383 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (13m) 8m ago 14m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:37:44.383 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (8m) 8m ago 14m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:37:44.383 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 3m ago 14m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:37:44.383 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 3m ago 14m - - 2026-04-16T19:37:44.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: audit 2026-04-16T19:37:43.319182+0000 mon.vm01 (mon.0) 1290 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:37:44.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: audit 2026-04-16T19:37:43.319182+0000 mon.vm01 (mon.0) 1290 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:37:44.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: audit 2026-04-16T19:37:43.319909+0000 mon.vm01 (mon.0) 1291 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:37:44.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: audit 2026-04-16T19:37:43.319909+0000 mon.vm01 (mon.0) 1291 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:37:44.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: cluster 2026-04-16T19:37:43.320916+0000 mgr.vm01.nwhpas (mgr.14227) 935 : cluster [DBG] pgmap v522: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:44.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: cluster 2026-04-16T19:37:43.320916+0000 mgr.vm01.nwhpas (mgr.14227) 935 : cluster [DBG] pgmap v522: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:44.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: cluster 2026-04-16T19:37:43.321063+0000 mgr.vm01.nwhpas (mgr.14227) 936 : cluster [DBG] pgmap v523: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:44.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: cluster 2026-04-16T19:37:43.321063+0000 mgr.vm01.nwhpas (mgr.14227) 936 : cluster [DBG] pgmap v523: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:44.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: audit 2026-04-16T19:37:43.325295+0000 mon.vm01 (mon.0) 1292 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:37:44.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: audit 2026-04-16T19:37:43.325295+0000 mon.vm01 (mon.0) 1292 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:37:44.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: audit 2026-04-16T19:37:43.326590+0000 mon.vm01 (mon.0) 1293 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:37:44.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:43 vm04 bash[34817]: audit 2026-04-16T19:37:43.326590+0000 mon.vm01 (mon.0) 1293 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:37:44.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: audit 2026-04-16T19:37:43.319182+0000 mon.vm01 (mon.0) 1290 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:37:44.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: audit 2026-04-16T19:37:43.319182+0000 mon.vm01 (mon.0) 1290 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:37:44.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: audit 2026-04-16T19:37:43.319909+0000 mon.vm01 (mon.0) 1291 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:37:44.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: audit 2026-04-16T19:37:43.319909+0000 mon.vm01 (mon.0) 1291 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:37:44.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: cluster 2026-04-16T19:37:43.320916+0000 mgr.vm01.nwhpas (mgr.14227) 935 : cluster [DBG] pgmap v522: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:44.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: cluster 2026-04-16T19:37:43.320916+0000 mgr.vm01.nwhpas (mgr.14227) 935 : cluster [DBG] pgmap v522: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:44.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: cluster 2026-04-16T19:37:43.321063+0000 mgr.vm01.nwhpas (mgr.14227) 936 : cluster [DBG] pgmap v523: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:44.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: cluster 2026-04-16T19:37:43.321063+0000 mgr.vm01.nwhpas (mgr.14227) 936 : cluster [DBG] pgmap v523: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:44.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: audit 2026-04-16T19:37:43.325295+0000 mon.vm01 (mon.0) 1292 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:37:44.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: audit 2026-04-16T19:37:43.325295+0000 mon.vm01 (mon.0) 1292 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:37:44.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: audit 2026-04-16T19:37:43.326590+0000 mon.vm01 (mon.0) 1293 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:37:44.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:43 vm01 bash[28222]: audit 2026-04-16T19:37:43.326590+0000 mon.vm01 (mon.0) 1293 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:37:44.629 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:37:44.629 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:37:44.629 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:37:45.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:44 vm04 bash[34817]: audit 2026-04-16T19:37:44.163982+0000 mgr.vm01.nwhpas (mgr.14227) 937 : audit [DBG] from='client.16614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:45.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:44 vm04 bash[34817]: audit 2026-04-16T19:37:44.163982+0000 mgr.vm01.nwhpas (mgr.14227) 937 : audit [DBG] from='client.16614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:45.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:44 vm04 bash[34817]: audit 2026-04-16T19:37:44.374818+0000 mgr.vm01.nwhpas (mgr.14227) 938 : audit [DBG] from='client.16618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:45.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:44 vm04 bash[34817]: audit 2026-04-16T19:37:44.374818+0000 mgr.vm01.nwhpas (mgr.14227) 938 : audit [DBG] from='client.16618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:45.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:44 vm04 bash[34817]: audit 2026-04-16T19:37:44.623344+0000 mon.vm01 (mon.0) 1294 : audit [DBG] from='client.? 192.168.123.101:0/594806595' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:45.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:44 vm04 bash[34817]: audit 2026-04-16T19:37:44.623344+0000 mon.vm01 (mon.0) 1294 : audit [DBG] from='client.? 192.168.123.101:0/594806595' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:45.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:44 vm01 bash[28222]: audit 2026-04-16T19:37:44.163982+0000 mgr.vm01.nwhpas (mgr.14227) 937 : audit [DBG] from='client.16614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:45.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:44 vm01 bash[28222]: audit 2026-04-16T19:37:44.163982+0000 mgr.vm01.nwhpas (mgr.14227) 937 : audit [DBG] from='client.16614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:45.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:44 vm01 bash[28222]: audit 2026-04-16T19:37:44.374818+0000 mgr.vm01.nwhpas (mgr.14227) 938 : audit [DBG] from='client.16618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:45.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:44 vm01 bash[28222]: audit 2026-04-16T19:37:44.374818+0000 mgr.vm01.nwhpas (mgr.14227) 938 : audit [DBG] from='client.16618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:45.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:44 vm01 bash[28222]: audit 2026-04-16T19:37:44.623344+0000 mon.vm01 (mon.0) 1294 : audit [DBG] from='client.? 192.168.123.101:0/594806595' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:45.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:44 vm01 bash[28222]: audit 2026-04-16T19:37:44.623344+0000 mon.vm01 (mon.0) 1294 : audit [DBG] from='client.? 192.168.123.101:0/594806595' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:46.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:46 vm04 bash[34817]: cluster 2026-04-16T19:37:45.321475+0000 mgr.vm01.nwhpas (mgr.14227) 939 : cluster [DBG] pgmap v524: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:46.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:46 vm04 bash[34817]: cluster 2026-04-16T19:37:45.321475+0000 mgr.vm01.nwhpas (mgr.14227) 939 : cluster [DBG] pgmap v524: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:46.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:46 vm01 bash[28222]: cluster 2026-04-16T19:37:45.321475+0000 mgr.vm01.nwhpas (mgr.14227) 939 : cluster [DBG] pgmap v524: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:46.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:46 vm01 bash[28222]: cluster 2026-04-16T19:37:45.321475+0000 mgr.vm01.nwhpas (mgr.14227) 939 : cluster [DBG] pgmap v524: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:48.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:48 vm04 bash[34817]: cluster 2026-04-16T19:37:47.321909+0000 mgr.vm01.nwhpas (mgr.14227) 940 : cluster [DBG] pgmap v525: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:48.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:48 vm04 bash[34817]: cluster 2026-04-16T19:37:47.321909+0000 mgr.vm01.nwhpas (mgr.14227) 940 : cluster [DBG] pgmap v525: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:48.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:48 vm01 bash[28222]: cluster 2026-04-16T19:37:47.321909+0000 mgr.vm01.nwhpas (mgr.14227) 940 : cluster [DBG] pgmap v525: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:48.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:48 vm01 bash[28222]: cluster 2026-04-16T19:37:47.321909+0000 mgr.vm01.nwhpas (mgr.14227) 940 : cluster [DBG] pgmap v525: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:37:49.837 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:37:50.017 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:37:50.017 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (13m) 8m ago 14m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:37:50.017 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (8m) 8m ago 14m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:37:50.017 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 3m ago 14m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:37:50.017 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 3m ago 14m - - 2026-04-16T19:37:50.257 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:37:50.257 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:37:50.257 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:37:50.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:50 vm04 bash[34817]: cluster 2026-04-16T19:37:49.322385+0000 mgr.vm01.nwhpas (mgr.14227) 941 : cluster [DBG] pgmap v526: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:37:50.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:50 vm04 bash[34817]: cluster 2026-04-16T19:37:49.322385+0000 mgr.vm01.nwhpas (mgr.14227) 941 : cluster [DBG] pgmap v526: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:37:50.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:50 vm04 bash[34817]: audit 2026-04-16T19:37:49.810867+0000 mgr.vm01.nwhpas (mgr.14227) 942 : audit [DBG] from='client.16626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:50.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:50 vm04 bash[34817]: audit 2026-04-16T19:37:49.810867+0000 mgr.vm01.nwhpas (mgr.14227) 942 : audit [DBG] from='client.16626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:50.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:50 vm04 bash[34817]: audit 2026-04-16T19:37:50.251103+0000 mon.vm01 (mon.0) 1295 : audit [DBG] from='client.? 192.168.123.101:0/4042987865' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:50.711 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:50 vm04 bash[34817]: audit 2026-04-16T19:37:50.251103+0000 mon.vm01 (mon.0) 1295 : audit [DBG] from='client.? 192.168.123.101:0/4042987865' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:50.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:50 vm01 bash[28222]: cluster 2026-04-16T19:37:49.322385+0000 mgr.vm01.nwhpas (mgr.14227) 941 : cluster [DBG] pgmap v526: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:37:50.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:50 vm01 bash[28222]: cluster 2026-04-16T19:37:49.322385+0000 mgr.vm01.nwhpas (mgr.14227) 941 : cluster [DBG] pgmap v526: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:37:50.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:50 vm01 bash[28222]: audit 2026-04-16T19:37:49.810867+0000 mgr.vm01.nwhpas (mgr.14227) 942 : audit [DBG] from='client.16626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:50.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:50 vm01 bash[28222]: audit 2026-04-16T19:37:49.810867+0000 mgr.vm01.nwhpas (mgr.14227) 942 : audit [DBG] from='client.16626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:50.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:50 vm01 bash[28222]: audit 2026-04-16T19:37:50.251103+0000 mon.vm01 (mon.0) 1295 : audit [DBG] from='client.? 192.168.123.101:0/4042987865' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:50.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:50 vm01 bash[28222]: audit 2026-04-16T19:37:50.251103+0000 mon.vm01 (mon.0) 1295 : audit [DBG] from='client.? 192.168.123.101:0/4042987865' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:51.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:51 vm04 bash[34817]: audit 2026-04-16T19:37:50.008996+0000 mgr.vm01.nwhpas (mgr.14227) 943 : audit [DBG] from='client.16630 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:51.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:51 vm04 bash[34817]: audit 2026-04-16T19:37:50.008996+0000 mgr.vm01.nwhpas (mgr.14227) 943 : audit [DBG] from='client.16630 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:51.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:51 vm01 bash[28222]: audit 2026-04-16T19:37:50.008996+0000 mgr.vm01.nwhpas (mgr.14227) 943 : audit [DBG] from='client.16630 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:51.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:51 vm01 bash[28222]: audit 2026-04-16T19:37:50.008996+0000 mgr.vm01.nwhpas (mgr.14227) 943 : audit [DBG] from='client.16630 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:52.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:52 vm04 bash[34817]: cluster 2026-04-16T19:37:51.322828+0000 mgr.vm01.nwhpas (mgr.14227) 944 : cluster [DBG] pgmap v527: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:37:52.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:52 vm04 bash[34817]: cluster 2026-04-16T19:37:51.322828+0000 mgr.vm01.nwhpas (mgr.14227) 944 : cluster [DBG] pgmap v527: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:37:52.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:52 vm01 bash[28222]: cluster 2026-04-16T19:37:51.322828+0000 mgr.vm01.nwhpas (mgr.14227) 944 : cluster [DBG] pgmap v527: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:37:52.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:52 vm01 bash[28222]: cluster 2026-04-16T19:37:51.322828+0000 mgr.vm01.nwhpas (mgr.14227) 944 : cluster [DBG] pgmap v527: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-04-16T19:37:53.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:53 vm04 bash[34817]: audit 2026-04-16T19:37:52.586602+0000 mon.vm01 (mon.0) 1296 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:53.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:53 vm04 bash[34817]: audit 2026-04-16T19:37:52.586602+0000 mon.vm01 (mon.0) 1296 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:53.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:53 vm01 bash[28222]: audit 2026-04-16T19:37:52.586602+0000 mon.vm01 (mon.0) 1296 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:53.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:53 vm01 bash[28222]: audit 2026-04-16T19:37:52.586602+0000 mon.vm01 (mon.0) 1296 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:37:54.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:54 vm04 bash[34817]: cluster 2026-04-16T19:37:53.323298+0000 mgr.vm01.nwhpas (mgr.14227) 945 : cluster [DBG] pgmap v528: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:37:54.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:54 vm04 bash[34817]: cluster 2026-04-16T19:37:53.323298+0000 mgr.vm01.nwhpas (mgr.14227) 945 : cluster [DBG] pgmap v528: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:37:54.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:54 vm01 bash[28222]: cluster 2026-04-16T19:37:53.323298+0000 mgr.vm01.nwhpas (mgr.14227) 945 : cluster [DBG] pgmap v528: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:37:54.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:54 vm01 bash[28222]: cluster 2026-04-16T19:37:53.323298+0000 mgr.vm01.nwhpas (mgr.14227) 945 : cluster [DBG] pgmap v528: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:37:55.463 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:37:55.647 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:37:55.647 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (13m) 8m ago 14m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:37:55.647 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (8m) 8m ago 14m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:37:55.647 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 3m ago 14m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:37:55.647 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 3m ago 14m - - 2026-04-16T19:37:55.878 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:37:55.879 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:37:55.879 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:37:56.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:56 vm04 bash[34817]: cluster 2026-04-16T19:37:55.323690+0000 mgr.vm01.nwhpas (mgr.14227) 946 : cluster [DBG] pgmap v529: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:56.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:56 vm04 bash[34817]: cluster 2026-04-16T19:37:55.323690+0000 mgr.vm01.nwhpas (mgr.14227) 946 : cluster [DBG] pgmap v529: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:56.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:56 vm04 bash[34817]: audit 2026-04-16T19:37:55.438205+0000 mgr.vm01.nwhpas (mgr.14227) 947 : audit [DBG] from='client.16638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:56.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:56 vm04 bash[34817]: audit 2026-04-16T19:37:55.438205+0000 mgr.vm01.nwhpas (mgr.14227) 947 : audit [DBG] from='client.16638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:56.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:56 vm04 bash[34817]: audit 2026-04-16T19:37:55.638684+0000 mgr.vm01.nwhpas (mgr.14227) 948 : audit [DBG] from='client.16642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:56.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:56 vm04 bash[34817]: audit 2026-04-16T19:37:55.638684+0000 mgr.vm01.nwhpas (mgr.14227) 948 : audit [DBG] from='client.16642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:56.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:56 vm04 bash[34817]: audit 2026-04-16T19:37:55.872898+0000 mon.vm01 (mon.0) 1297 : audit [DBG] from='client.? 192.168.123.101:0/809502826' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:56.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:56 vm04 bash[34817]: audit 2026-04-16T19:37:55.872898+0000 mon.vm01 (mon.0) 1297 : audit [DBG] from='client.? 192.168.123.101:0/809502826' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:56.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:56 vm01 bash[28222]: cluster 2026-04-16T19:37:55.323690+0000 mgr.vm01.nwhpas (mgr.14227) 946 : cluster [DBG] pgmap v529: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:56.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:56 vm01 bash[28222]: cluster 2026-04-16T19:37:55.323690+0000 mgr.vm01.nwhpas (mgr.14227) 946 : cluster [DBG] pgmap v529: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:56.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:56 vm01 bash[28222]: audit 2026-04-16T19:37:55.438205+0000 mgr.vm01.nwhpas (mgr.14227) 947 : audit [DBG] from='client.16638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:56.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:56 vm01 bash[28222]: audit 2026-04-16T19:37:55.438205+0000 mgr.vm01.nwhpas (mgr.14227) 947 : audit [DBG] from='client.16638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:56.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:56 vm01 bash[28222]: audit 2026-04-16T19:37:55.638684+0000 mgr.vm01.nwhpas (mgr.14227) 948 : audit [DBG] from='client.16642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:56.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:56 vm01 bash[28222]: audit 2026-04-16T19:37:55.638684+0000 mgr.vm01.nwhpas (mgr.14227) 948 : audit [DBG] from='client.16642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:37:56.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:56 vm01 bash[28222]: audit 2026-04-16T19:37:55.872898+0000 mon.vm01 (mon.0) 1297 : audit [DBG] from='client.? 192.168.123.101:0/809502826' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:56.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:56 vm01 bash[28222]: audit 2026-04-16T19:37:55.872898+0000 mon.vm01 (mon.0) 1297 : audit [DBG] from='client.? 192.168.123.101:0/809502826' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:37:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:58 vm04 bash[34817]: cluster 2026-04-16T19:37:57.324124+0000 mgr.vm01.nwhpas (mgr.14227) 949 : cluster [DBG] pgmap v530: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:37:58 vm04 bash[34817]: cluster 2026-04-16T19:37:57.324124+0000 mgr.vm01.nwhpas (mgr.14227) 949 : cluster [DBG] pgmap v530: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:58.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:58 vm01 bash[28222]: cluster 2026-04-16T19:37:57.324124+0000 mgr.vm01.nwhpas (mgr.14227) 949 : cluster [DBG] pgmap v530: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:37:58.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:37:58 vm01 bash[28222]: cluster 2026-04-16T19:37:57.324124+0000 mgr.vm01.nwhpas (mgr.14227) 949 : cluster [DBG] pgmap v530: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:00.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:00 vm04 bash[34817]: cluster 2026-04-16T19:37:59.324537+0000 mgr.vm01.nwhpas (mgr.14227) 950 : cluster [DBG] pgmap v531: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:38:00.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:00 vm04 bash[34817]: cluster 2026-04-16T19:37:59.324537+0000 mgr.vm01.nwhpas (mgr.14227) 950 : cluster [DBG] pgmap v531: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:38:00.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:00 vm01 bash[28222]: cluster 2026-04-16T19:37:59.324537+0000 mgr.vm01.nwhpas (mgr.14227) 950 : cluster [DBG] pgmap v531: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:38:00.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:00 vm01 bash[28222]: cluster 2026-04-16T19:37:59.324537+0000 mgr.vm01.nwhpas (mgr.14227) 950 : cluster [DBG] pgmap v531: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:38:01.084 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:38:01.270 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:38:01.270 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (14m) 8m ago 14m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:38:01.270 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (8m) 8m ago 14m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:38:01.270 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 3m ago 14m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:38:01.270 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 3m ago 14m - - 2026-04-16T19:38:01.505 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:38:01.505 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:38:01.505 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:38:01.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:01 vm04 bash[34817]: audit 2026-04-16T19:38:01.498847+0000 mon.vm01 (mon.0) 1298 : audit [DBG] from='client.? 192.168.123.101:0/1438100006' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:01.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:01 vm04 bash[34817]: audit 2026-04-16T19:38:01.498847+0000 mon.vm01 (mon.0) 1298 : audit [DBG] from='client.? 192.168.123.101:0/1438100006' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:01.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:01 vm01 bash[28222]: audit 2026-04-16T19:38:01.498847+0000 mon.vm01 (mon.0) 1298 : audit [DBG] from='client.? 192.168.123.101:0/1438100006' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:01.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:01 vm01 bash[28222]: audit 2026-04-16T19:38:01.498847+0000 mon.vm01 (mon.0) 1298 : audit [DBG] from='client.? 192.168.123.101:0/1438100006' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:02 vm04 bash[34817]: audit 2026-04-16T19:38:01.060721+0000 mgr.vm01.nwhpas (mgr.14227) 951 : audit [DBG] from='client.16650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:02 vm04 bash[34817]: audit 2026-04-16T19:38:01.060721+0000 mgr.vm01.nwhpas (mgr.14227) 951 : audit [DBG] from='client.16650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:02 vm04 bash[34817]: audit 2026-04-16T19:38:01.260683+0000 mgr.vm01.nwhpas (mgr.14227) 952 : audit [DBG] from='client.16654 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:02 vm04 bash[34817]: audit 2026-04-16T19:38:01.260683+0000 mgr.vm01.nwhpas (mgr.14227) 952 : audit [DBG] from='client.16654 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:02 vm04 bash[34817]: cluster 2026-04-16T19:38:01.324984+0000 mgr.vm01.nwhpas (mgr.14227) 953 : cluster [DBG] pgmap v532: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:02.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:02 vm04 bash[34817]: cluster 2026-04-16T19:38:01.324984+0000 mgr.vm01.nwhpas (mgr.14227) 953 : cluster [DBG] pgmap v532: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:02.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:02 vm01 bash[28222]: audit 2026-04-16T19:38:01.060721+0000 mgr.vm01.nwhpas (mgr.14227) 951 : audit [DBG] from='client.16650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:02.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:02 vm01 bash[28222]: audit 2026-04-16T19:38:01.060721+0000 mgr.vm01.nwhpas (mgr.14227) 951 : audit [DBG] from='client.16650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:02.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:02 vm01 bash[28222]: audit 2026-04-16T19:38:01.260683+0000 mgr.vm01.nwhpas (mgr.14227) 952 : audit [DBG] from='client.16654 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:02.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:02 vm01 bash[28222]: audit 2026-04-16T19:38:01.260683+0000 mgr.vm01.nwhpas (mgr.14227) 952 : audit [DBG] from='client.16654 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:02.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:02 vm01 bash[28222]: cluster 2026-04-16T19:38:01.324984+0000 mgr.vm01.nwhpas (mgr.14227) 953 : cluster [DBG] pgmap v532: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:02.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:02 vm01 bash[28222]: cluster 2026-04-16T19:38:01.324984+0000 mgr.vm01.nwhpas (mgr.14227) 953 : cluster [DBG] pgmap v532: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:04 vm04 bash[34817]: cluster 2026-04-16T19:38:03.325421+0000 mgr.vm01.nwhpas (mgr.14227) 954 : cluster [DBG] pgmap v533: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:04 vm04 bash[34817]: cluster 2026-04-16T19:38:03.325421+0000 mgr.vm01.nwhpas (mgr.14227) 954 : cluster [DBG] pgmap v533: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:04.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:04 vm01 bash[28222]: cluster 2026-04-16T19:38:03.325421+0000 mgr.vm01.nwhpas (mgr.14227) 954 : cluster [DBG] pgmap v533: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:04.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:04 vm01 bash[28222]: cluster 2026-04-16T19:38:03.325421+0000 mgr.vm01.nwhpas (mgr.14227) 954 : cluster [DBG] pgmap v533: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:06.718 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:38:06.899 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:38:06.899 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (14m) 8m ago 15m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:38:06.899 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (8m) 8m ago 15m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:38:06.899 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 3m ago 15m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:38:06.899 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 3m ago 15m - - 2026-04-16T19:38:06.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:06 vm04 bash[34817]: cluster 2026-04-16T19:38:05.326121+0000 mgr.vm01.nwhpas (mgr.14227) 955 : cluster [DBG] pgmap v534: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:06.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:06 vm04 bash[34817]: cluster 2026-04-16T19:38:05.326121+0000 mgr.vm01.nwhpas (mgr.14227) 955 : cluster [DBG] pgmap v534: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:06.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:06 vm01 bash[28222]: cluster 2026-04-16T19:38:05.326121+0000 mgr.vm01.nwhpas (mgr.14227) 955 : cluster [DBG] pgmap v534: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:06.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:06 vm01 bash[28222]: cluster 2026-04-16T19:38:05.326121+0000 mgr.vm01.nwhpas (mgr.14227) 955 : cluster [DBG] pgmap v534: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:07.133 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:38:07.133 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:38:07.133 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:38:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:07 vm04 bash[34817]: audit 2026-04-16T19:38:06.696267+0000 mgr.vm01.nwhpas (mgr.14227) 956 : audit [DBG] from='client.16662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:07 vm04 bash[34817]: audit 2026-04-16T19:38:06.696267+0000 mgr.vm01.nwhpas (mgr.14227) 956 : audit [DBG] from='client.16662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:07 vm04 bash[34817]: audit 2026-04-16T19:38:06.890555+0000 mgr.vm01.nwhpas (mgr.14227) 957 : audit [DBG] from='client.16666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:07 vm04 bash[34817]: audit 2026-04-16T19:38:06.890555+0000 mgr.vm01.nwhpas (mgr.14227) 957 : audit [DBG] from='client.16666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:07 vm04 bash[34817]: audit 2026-04-16T19:38:07.127472+0000 mon.vm01 (mon.0) 1299 : audit [DBG] from='client.? 192.168.123.101:0/3777968083' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:07.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:07 vm04 bash[34817]: audit 2026-04-16T19:38:07.127472+0000 mon.vm01 (mon.0) 1299 : audit [DBG] from='client.? 192.168.123.101:0/3777968083' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:07.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:07 vm01 bash[28222]: audit 2026-04-16T19:38:06.696267+0000 mgr.vm01.nwhpas (mgr.14227) 956 : audit [DBG] from='client.16662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:07.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:07 vm01 bash[28222]: audit 2026-04-16T19:38:06.696267+0000 mgr.vm01.nwhpas (mgr.14227) 956 : audit [DBG] from='client.16662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:07.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:07 vm01 bash[28222]: audit 2026-04-16T19:38:06.890555+0000 mgr.vm01.nwhpas (mgr.14227) 957 : audit [DBG] from='client.16666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:07.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:07 vm01 bash[28222]: audit 2026-04-16T19:38:06.890555+0000 mgr.vm01.nwhpas (mgr.14227) 957 : audit [DBG] from='client.16666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:07.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:07 vm01 bash[28222]: audit 2026-04-16T19:38:07.127472+0000 mon.vm01 (mon.0) 1299 : audit [DBG] from='client.? 192.168.123.101:0/3777968083' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:07.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:07 vm01 bash[28222]: audit 2026-04-16T19:38:07.127472+0000 mon.vm01 (mon.0) 1299 : audit [DBG] from='client.? 192.168.123.101:0/3777968083' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:08.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:08 vm04 bash[34817]: cluster 2026-04-16T19:38:07.326601+0000 mgr.vm01.nwhpas (mgr.14227) 958 : cluster [DBG] pgmap v535: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:08.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:08 vm04 bash[34817]: cluster 2026-04-16T19:38:07.326601+0000 mgr.vm01.nwhpas (mgr.14227) 958 : cluster [DBG] pgmap v535: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:08.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:08 vm04 bash[34817]: audit 2026-04-16T19:38:07.586793+0000 mon.vm01 (mon.0) 1300 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:08.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:08 vm04 bash[34817]: audit 2026-04-16T19:38:07.586793+0000 mon.vm01 (mon.0) 1300 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:08.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:08 vm01 bash[28222]: cluster 2026-04-16T19:38:07.326601+0000 mgr.vm01.nwhpas (mgr.14227) 958 : cluster [DBG] pgmap v535: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:08.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:08 vm01 bash[28222]: cluster 2026-04-16T19:38:07.326601+0000 mgr.vm01.nwhpas (mgr.14227) 958 : cluster [DBG] pgmap v535: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:08.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:08 vm01 bash[28222]: audit 2026-04-16T19:38:07.586793+0000 mon.vm01 (mon.0) 1300 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:08.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:08 vm01 bash[28222]: audit 2026-04-16T19:38:07.586793+0000 mon.vm01 (mon.0) 1300 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:10.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:10 vm04 bash[34817]: cluster 2026-04-16T19:38:09.327119+0000 mgr.vm01.nwhpas (mgr.14227) 959 : cluster [DBG] pgmap v536: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:10.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:10 vm04 bash[34817]: cluster 2026-04-16T19:38:09.327119+0000 mgr.vm01.nwhpas (mgr.14227) 959 : cluster [DBG] pgmap v536: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:10.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:10 vm01 bash[28222]: cluster 2026-04-16T19:38:09.327119+0000 mgr.vm01.nwhpas (mgr.14227) 959 : cluster [DBG] pgmap v536: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:10.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:10 vm01 bash[28222]: cluster 2026-04-16T19:38:09.327119+0000 mgr.vm01.nwhpas (mgr.14227) 959 : cluster [DBG] pgmap v536: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:12.330 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:38:12.507 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:38:12.507 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (14m) 8m ago 15m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:38:12.507 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (8m) 8m ago 15m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:38:12.507 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 3m ago 15m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:38:12.507 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 3m ago 15m - - 2026-04-16T19:38:12.728 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:38:12.728 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:38:12.728 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:38:12.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:12 vm04 bash[34817]: cluster 2026-04-16T19:38:11.327575+0000 mgr.vm01.nwhpas (mgr.14227) 960 : cluster [DBG] pgmap v537: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:12.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:12 vm04 bash[34817]: cluster 2026-04-16T19:38:11.327575+0000 mgr.vm01.nwhpas (mgr.14227) 960 : cluster [DBG] pgmap v537: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:12.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:12 vm01 bash[28222]: cluster 2026-04-16T19:38:11.327575+0000 mgr.vm01.nwhpas (mgr.14227) 960 : cluster [DBG] pgmap v537: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:12.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:12 vm01 bash[28222]: cluster 2026-04-16T19:38:11.327575+0000 mgr.vm01.nwhpas (mgr.14227) 960 : cluster [DBG] pgmap v537: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:13 vm04 bash[34817]: audit 2026-04-16T19:38:12.307950+0000 mgr.vm01.nwhpas (mgr.14227) 961 : audit [DBG] from='client.16674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:13 vm04 bash[34817]: audit 2026-04-16T19:38:12.307950+0000 mgr.vm01.nwhpas (mgr.14227) 961 : audit [DBG] from='client.16674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:13 vm04 bash[34817]: audit 2026-04-16T19:38:12.498584+0000 mgr.vm01.nwhpas (mgr.14227) 962 : audit [DBG] from='client.16678 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:13 vm04 bash[34817]: audit 2026-04-16T19:38:12.498584+0000 mgr.vm01.nwhpas (mgr.14227) 962 : audit [DBG] from='client.16678 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:13 vm04 bash[34817]: audit 2026-04-16T19:38:12.722400+0000 mon.vm01 (mon.0) 1301 : audit [DBG] from='client.? 192.168.123.101:0/103805372' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:13.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:13 vm04 bash[34817]: audit 2026-04-16T19:38:12.722400+0000 mon.vm01 (mon.0) 1301 : audit [DBG] from='client.? 192.168.123.101:0/103805372' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:13.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:13 vm01 bash[28222]: audit 2026-04-16T19:38:12.307950+0000 mgr.vm01.nwhpas (mgr.14227) 961 : audit [DBG] from='client.16674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:13.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:13 vm01 bash[28222]: audit 2026-04-16T19:38:12.307950+0000 mgr.vm01.nwhpas (mgr.14227) 961 : audit [DBG] from='client.16674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:13.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:13 vm01 bash[28222]: audit 2026-04-16T19:38:12.498584+0000 mgr.vm01.nwhpas (mgr.14227) 962 : audit [DBG] from='client.16678 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:13.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:13 vm01 bash[28222]: audit 2026-04-16T19:38:12.498584+0000 mgr.vm01.nwhpas (mgr.14227) 962 : audit [DBG] from='client.16678 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:13.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:13 vm01 bash[28222]: audit 2026-04-16T19:38:12.722400+0000 mon.vm01 (mon.0) 1301 : audit [DBG] from='client.? 192.168.123.101:0/103805372' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:13.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:13 vm01 bash[28222]: audit 2026-04-16T19:38:12.722400+0000 mon.vm01 (mon.0) 1301 : audit [DBG] from='client.? 192.168.123.101:0/103805372' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:14.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:14 vm04 bash[34817]: cluster 2026-04-16T19:38:13.327923+0000 mgr.vm01.nwhpas (mgr.14227) 963 : cluster [DBG] pgmap v538: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:14.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:14 vm04 bash[34817]: cluster 2026-04-16T19:38:13.327923+0000 mgr.vm01.nwhpas (mgr.14227) 963 : cluster [DBG] pgmap v538: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:14.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:14 vm01 bash[28222]: cluster 2026-04-16T19:38:13.327923+0000 mgr.vm01.nwhpas (mgr.14227) 963 : cluster [DBG] pgmap v538: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:14.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:14 vm01 bash[28222]: cluster 2026-04-16T19:38:13.327923+0000 mgr.vm01.nwhpas (mgr.14227) 963 : cluster [DBG] pgmap v538: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:16.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:16 vm04 bash[34817]: cluster 2026-04-16T19:38:15.328368+0000 mgr.vm01.nwhpas (mgr.14227) 964 : cluster [DBG] pgmap v539: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:16.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:16 vm04 bash[34817]: cluster 2026-04-16T19:38:15.328368+0000 mgr.vm01.nwhpas (mgr.14227) 964 : cluster [DBG] pgmap v539: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:16.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:16 vm01 bash[28222]: cluster 2026-04-16T19:38:15.328368+0000 mgr.vm01.nwhpas (mgr.14227) 964 : cluster [DBG] pgmap v539: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:16.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:16 vm01 bash[28222]: cluster 2026-04-16T19:38:15.328368+0000 mgr.vm01.nwhpas (mgr.14227) 964 : cluster [DBG] pgmap v539: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:17.944 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:38:18.130 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:38:18.130 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (14m) 8m ago 15m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:38:18.130 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (9m) 8m ago 15m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:38:18.130 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (3m) 3m ago 15m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:38:18.130 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 3m ago 15m - - 2026-04-16T19:38:18.360 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:38:18.360 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:38:18.360 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:38:18.680 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:18 vm04 bash[34817]: cluster 2026-04-16T19:38:17.328758+0000 mgr.vm01.nwhpas (mgr.14227) 965 : cluster [DBG] pgmap v540: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:18.680 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:18 vm04 bash[34817]: cluster 2026-04-16T19:38:17.328758+0000 mgr.vm01.nwhpas (mgr.14227) 965 : cluster [DBG] pgmap v540: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:18.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:18 vm04 bash[34817]: audit 2026-04-16T19:38:17.921327+0000 mgr.vm01.nwhpas (mgr.14227) 966 : audit [DBG] from='client.16686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:18.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:18 vm04 bash[34817]: audit 2026-04-16T19:38:17.921327+0000 mgr.vm01.nwhpas (mgr.14227) 966 : audit [DBG] from='client.16686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:18.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:18 vm04 bash[34817]: audit 2026-04-16T19:38:18.354655+0000 mon.vm01 (mon.0) 1302 : audit [DBG] from='client.? 192.168.123.101:0/739791981' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:18.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:18 vm04 bash[34817]: audit 2026-04-16T19:38:18.354655+0000 mon.vm01 (mon.0) 1302 : audit [DBG] from='client.? 192.168.123.101:0/739791981' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:18.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:18 vm01 bash[28222]: cluster 2026-04-16T19:38:17.328758+0000 mgr.vm01.nwhpas (mgr.14227) 965 : cluster [DBG] pgmap v540: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:18.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:18 vm01 bash[28222]: cluster 2026-04-16T19:38:17.328758+0000 mgr.vm01.nwhpas (mgr.14227) 965 : cluster [DBG] pgmap v540: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:18.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:18 vm01 bash[28222]: audit 2026-04-16T19:38:17.921327+0000 mgr.vm01.nwhpas (mgr.14227) 966 : audit [DBG] from='client.16686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:18.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:18 vm01 bash[28222]: audit 2026-04-16T19:38:17.921327+0000 mgr.vm01.nwhpas (mgr.14227) 966 : audit [DBG] from='client.16686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:18.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:18 vm01 bash[28222]: audit 2026-04-16T19:38:18.354655+0000 mon.vm01 (mon.0) 1302 : audit [DBG] from='client.? 192.168.123.101:0/739791981' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:18.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:18 vm01 bash[28222]: audit 2026-04-16T19:38:18.354655+0000 mon.vm01 (mon.0) 1302 : audit [DBG] from='client.? 192.168.123.101:0/739791981' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:19.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:19 vm04 bash[34817]: audit 2026-04-16T19:38:18.121311+0000 mgr.vm01.nwhpas (mgr.14227) 967 : audit [DBG] from='client.16690 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:19.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:19 vm04 bash[34817]: audit 2026-04-16T19:38:18.121311+0000 mgr.vm01.nwhpas (mgr.14227) 967 : audit [DBG] from='client.16690 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:19.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:19 vm01 bash[28222]: audit 2026-04-16T19:38:18.121311+0000 mgr.vm01.nwhpas (mgr.14227) 967 : audit [DBG] from='client.16690 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:19.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:19 vm01 bash[28222]: audit 2026-04-16T19:38:18.121311+0000 mgr.vm01.nwhpas (mgr.14227) 967 : audit [DBG] from='client.16690 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:20.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:20 vm04 bash[34817]: cluster 2026-04-16T19:38:19.329269+0000 mgr.vm01.nwhpas (mgr.14227) 968 : cluster [DBG] pgmap v541: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:20.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:20 vm04 bash[34817]: cluster 2026-04-16T19:38:19.329269+0000 mgr.vm01.nwhpas (mgr.14227) 968 : cluster [DBG] pgmap v541: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:20.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:20 vm01 bash[28222]: cluster 2026-04-16T19:38:19.329269+0000 mgr.vm01.nwhpas (mgr.14227) 968 : cluster [DBG] pgmap v541: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:20.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:20 vm01 bash[28222]: cluster 2026-04-16T19:38:19.329269+0000 mgr.vm01.nwhpas (mgr.14227) 968 : cluster [DBG] pgmap v541: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:22.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:22 vm04 bash[34817]: cluster 2026-04-16T19:38:21.329704+0000 mgr.vm01.nwhpas (mgr.14227) 969 : cluster [DBG] pgmap v542: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:22.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:22 vm04 bash[34817]: cluster 2026-04-16T19:38:21.329704+0000 mgr.vm01.nwhpas (mgr.14227) 969 : cluster [DBG] pgmap v542: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:22.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:22 vm04 bash[34817]: audit 2026-04-16T19:38:22.586933+0000 mon.vm01 (mon.0) 1303 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:22.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:22 vm04 bash[34817]: audit 2026-04-16T19:38:22.586933+0000 mon.vm01 (mon.0) 1303 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:22.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:22 vm01 bash[28222]: cluster 2026-04-16T19:38:21.329704+0000 mgr.vm01.nwhpas (mgr.14227) 969 : cluster [DBG] pgmap v542: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:22.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:22 vm01 bash[28222]: cluster 2026-04-16T19:38:21.329704+0000 mgr.vm01.nwhpas (mgr.14227) 969 : cluster [DBG] pgmap v542: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:22.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:22 vm01 bash[28222]: audit 2026-04-16T19:38:22.586933+0000 mon.vm01 (mon.0) 1303 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:22.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:22 vm01 bash[28222]: audit 2026-04-16T19:38:22.586933+0000 mon.vm01 (mon.0) 1303 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:23.565 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:38:23.755 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:38:23.755 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (14m) 9m ago 15m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:38:23.755 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (9m) 9m ago 15m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:38:23.755 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 3m ago 15m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:38:23.755 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 3m ago 15m - - 2026-04-16T19:38:23.977 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:38:23.977 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:38:23.977 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:38:24.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:24 vm04 bash[34817]: cluster 2026-04-16T19:38:23.330079+0000 mgr.vm01.nwhpas (mgr.14227) 970 : cluster [DBG] pgmap v543: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:24.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:24 vm04 bash[34817]: cluster 2026-04-16T19:38:23.330079+0000 mgr.vm01.nwhpas (mgr.14227) 970 : cluster [DBG] pgmap v543: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:24.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:24 vm04 bash[34817]: audit 2026-04-16T19:38:23.543080+0000 mgr.vm01.nwhpas (mgr.14227) 971 : audit [DBG] from='client.16698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:24.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:24 vm04 bash[34817]: audit 2026-04-16T19:38:23.543080+0000 mgr.vm01.nwhpas (mgr.14227) 971 : audit [DBG] from='client.16698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:24.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:24 vm04 bash[34817]: audit 2026-04-16T19:38:23.746705+0000 mgr.vm01.nwhpas (mgr.14227) 972 : audit [DBG] from='client.16702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:24.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:24 vm04 bash[34817]: audit 2026-04-16T19:38:23.746705+0000 mgr.vm01.nwhpas (mgr.14227) 972 : audit [DBG] from='client.16702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:24.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:24 vm04 bash[34817]: audit 2026-04-16T19:38:23.971451+0000 mon.vm01 (mon.0) 1304 : audit [DBG] from='client.? 192.168.123.101:0/1149346855' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:24.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:24 vm04 bash[34817]: audit 2026-04-16T19:38:23.971451+0000 mon.vm01 (mon.0) 1304 : audit [DBG] from='client.? 192.168.123.101:0/1149346855' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:24.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:24 vm01 bash[28222]: cluster 2026-04-16T19:38:23.330079+0000 mgr.vm01.nwhpas (mgr.14227) 970 : cluster [DBG] pgmap v543: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:24.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:24 vm01 bash[28222]: cluster 2026-04-16T19:38:23.330079+0000 mgr.vm01.nwhpas (mgr.14227) 970 : cluster [DBG] pgmap v543: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:24.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:24 vm01 bash[28222]: audit 2026-04-16T19:38:23.543080+0000 mgr.vm01.nwhpas (mgr.14227) 971 : audit [DBG] from='client.16698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:24.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:24 vm01 bash[28222]: audit 2026-04-16T19:38:23.543080+0000 mgr.vm01.nwhpas (mgr.14227) 971 : audit [DBG] from='client.16698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:24.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:24 vm01 bash[28222]: audit 2026-04-16T19:38:23.746705+0000 mgr.vm01.nwhpas (mgr.14227) 972 : audit [DBG] from='client.16702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:24.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:24 vm01 bash[28222]: audit 2026-04-16T19:38:23.746705+0000 mgr.vm01.nwhpas (mgr.14227) 972 : audit [DBG] from='client.16702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:24.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:24 vm01 bash[28222]: audit 2026-04-16T19:38:23.971451+0000 mon.vm01 (mon.0) 1304 : audit [DBG] from='client.? 192.168.123.101:0/1149346855' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:24.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:24 vm01 bash[28222]: audit 2026-04-16T19:38:23.971451+0000 mon.vm01 (mon.0) 1304 : audit [DBG] from='client.? 192.168.123.101:0/1149346855' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:26 vm01 bash[28222]: cluster 2026-04-16T19:38:25.330488+0000 mgr.vm01.nwhpas (mgr.14227) 973 : cluster [DBG] pgmap v544: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:26 vm01 bash[28222]: cluster 2026-04-16T19:38:25.330488+0000 mgr.vm01.nwhpas (mgr.14227) 973 : cluster [DBG] pgmap v544: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:26 vm04 bash[34817]: cluster 2026-04-16T19:38:25.330488+0000 mgr.vm01.nwhpas (mgr.14227) 973 : cluster [DBG] pgmap v544: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:26 vm04 bash[34817]: cluster 2026-04-16T19:38:25.330488+0000 mgr.vm01.nwhpas (mgr.14227) 973 : cluster [DBG] pgmap v544: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:29.182 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:38:29.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:28 vm04 bash[34817]: cluster 2026-04-16T19:38:27.330981+0000 mgr.vm01.nwhpas (mgr.14227) 974 : cluster [DBG] pgmap v545: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:29.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:28 vm04 bash[34817]: cluster 2026-04-16T19:38:27.330981+0000 mgr.vm01.nwhpas (mgr.14227) 974 : cluster [DBG] pgmap v545: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:29.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:28 vm01 bash[28222]: cluster 2026-04-16T19:38:27.330981+0000 mgr.vm01.nwhpas (mgr.14227) 974 : cluster [DBG] pgmap v545: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:29.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:28 vm01 bash[28222]: cluster 2026-04-16T19:38:27.330981+0000 mgr.vm01.nwhpas (mgr.14227) 974 : cluster [DBG] pgmap v545: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:29.360 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:38:29.360 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (14m) 9m ago 15m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:38:29.360 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (9m) 9m ago 15m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:38:29.360 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 3m ago 15m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:38:29.360 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 3m ago 15m - - 2026-04-16T19:38:29.584 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:38:29.584 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:38:29.585 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:38:30.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:29 vm04 bash[34817]: audit 2026-04-16T19:38:29.579055+0000 mon.vm04 (mon.1) 41 : audit [DBG] from='client.? 192.168.123.101:0/2202774964' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:30.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:29 vm04 bash[34817]: audit 2026-04-16T19:38:29.579055+0000 mon.vm04 (mon.1) 41 : audit [DBG] from='client.? 192.168.123.101:0/2202774964' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:30.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:29 vm01 bash[28222]: audit 2026-04-16T19:38:29.579055+0000 mon.vm04 (mon.1) 41 : audit [DBG] from='client.? 192.168.123.101:0/2202774964' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:30.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:29 vm01 bash[28222]: audit 2026-04-16T19:38:29.579055+0000 mon.vm04 (mon.1) 41 : audit [DBG] from='client.? 192.168.123.101:0/2202774964' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:31.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:30 vm04 bash[34817]: audit 2026-04-16T19:38:29.158783+0000 mgr.vm01.nwhpas (mgr.14227) 975 : audit [DBG] from='client.16710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:31.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:30 vm04 bash[34817]: audit 2026-04-16T19:38:29.158783+0000 mgr.vm01.nwhpas (mgr.14227) 975 : audit [DBG] from='client.16710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:31.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:30 vm04 bash[34817]: cluster 2026-04-16T19:38:29.331472+0000 mgr.vm01.nwhpas (mgr.14227) 976 : cluster [DBG] pgmap v546: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:38:31.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:30 vm04 bash[34817]: cluster 2026-04-16T19:38:29.331472+0000 mgr.vm01.nwhpas (mgr.14227) 976 : cluster [DBG] pgmap v546: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:38:31.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:30 vm04 bash[34817]: audit 2026-04-16T19:38:29.351923+0000 mgr.vm01.nwhpas (mgr.14227) 977 : audit [DBG] from='client.16714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:31.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:30 vm04 bash[34817]: audit 2026-04-16T19:38:29.351923+0000 mgr.vm01.nwhpas (mgr.14227) 977 : audit [DBG] from='client.16714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:31.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:30 vm01 bash[28222]: audit 2026-04-16T19:38:29.158783+0000 mgr.vm01.nwhpas (mgr.14227) 975 : audit [DBG] from='client.16710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:31.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:30 vm01 bash[28222]: audit 2026-04-16T19:38:29.158783+0000 mgr.vm01.nwhpas (mgr.14227) 975 : audit [DBG] from='client.16710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:31.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:30 vm01 bash[28222]: cluster 2026-04-16T19:38:29.331472+0000 mgr.vm01.nwhpas (mgr.14227) 976 : cluster [DBG] pgmap v546: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:38:31.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:30 vm01 bash[28222]: cluster 2026-04-16T19:38:29.331472+0000 mgr.vm01.nwhpas (mgr.14227) 976 : cluster [DBG] pgmap v546: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:38:31.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:30 vm01 bash[28222]: audit 2026-04-16T19:38:29.351923+0000 mgr.vm01.nwhpas (mgr.14227) 977 : audit [DBG] from='client.16714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:31.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:30 vm01 bash[28222]: audit 2026-04-16T19:38:29.351923+0000 mgr.vm01.nwhpas (mgr.14227) 977 : audit [DBG] from='client.16714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:33.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:32 vm04 bash[34817]: cluster 2026-04-16T19:38:31.331887+0000 mgr.vm01.nwhpas (mgr.14227) 978 : cluster [DBG] pgmap v547: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:33.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:32 vm04 bash[34817]: cluster 2026-04-16T19:38:31.331887+0000 mgr.vm01.nwhpas (mgr.14227) 978 : cluster [DBG] pgmap v547: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:33.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:32 vm01 bash[28222]: cluster 2026-04-16T19:38:31.331887+0000 mgr.vm01.nwhpas (mgr.14227) 978 : cluster [DBG] pgmap v547: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:33.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:32 vm01 bash[28222]: cluster 2026-04-16T19:38:31.331887+0000 mgr.vm01.nwhpas (mgr.14227) 978 : cluster [DBG] pgmap v547: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:34.793 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:38:34.974 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:38:34.974 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (14m) 9m ago 15m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:38:34.974 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (9m) 9m ago 15m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:38:34.974 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 3m ago 15m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:38:34.974 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 3m ago 15m - - 2026-04-16T19:38:35.207 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:38:35.207 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:38:35.208 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:38:35.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:34 vm04 bash[34817]: cluster 2026-04-16T19:38:33.332331+0000 mgr.vm01.nwhpas (mgr.14227) 979 : cluster [DBG] pgmap v548: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:35.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:34 vm04 bash[34817]: cluster 2026-04-16T19:38:33.332331+0000 mgr.vm01.nwhpas (mgr.14227) 979 : cluster [DBG] pgmap v548: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:35.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:34 vm01 bash[28222]: cluster 2026-04-16T19:38:33.332331+0000 mgr.vm01.nwhpas (mgr.14227) 979 : cluster [DBG] pgmap v548: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:35.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:34 vm01 bash[28222]: cluster 2026-04-16T19:38:33.332331+0000 mgr.vm01.nwhpas (mgr.14227) 979 : cluster [DBG] pgmap v548: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:35 vm04 bash[34817]: audit 2026-04-16T19:38:34.769798+0000 mgr.vm01.nwhpas (mgr.14227) 980 : audit [DBG] from='client.16722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:35 vm04 bash[34817]: audit 2026-04-16T19:38:34.769798+0000 mgr.vm01.nwhpas (mgr.14227) 980 : audit [DBG] from='client.16722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:35 vm04 bash[34817]: audit 2026-04-16T19:38:35.201690+0000 mon.vm01 (mon.0) 1305 : audit [DBG] from='client.? 192.168.123.101:0/820265124' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:36.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:35 vm04 bash[34817]: audit 2026-04-16T19:38:35.201690+0000 mon.vm01 (mon.0) 1305 : audit [DBG] from='client.? 192.168.123.101:0/820265124' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:36.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:35 vm01 bash[28222]: audit 2026-04-16T19:38:34.769798+0000 mgr.vm01.nwhpas (mgr.14227) 980 : audit [DBG] from='client.16722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:36.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:35 vm01 bash[28222]: audit 2026-04-16T19:38:34.769798+0000 mgr.vm01.nwhpas (mgr.14227) 980 : audit [DBG] from='client.16722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:36.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:35 vm01 bash[28222]: audit 2026-04-16T19:38:35.201690+0000 mon.vm01 (mon.0) 1305 : audit [DBG] from='client.? 192.168.123.101:0/820265124' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:36.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:35 vm01 bash[28222]: audit 2026-04-16T19:38:35.201690+0000 mon.vm01 (mon.0) 1305 : audit [DBG] from='client.? 192.168.123.101:0/820265124' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:36.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:36 vm01 bash[28222]: audit 2026-04-16T19:38:34.965923+0000 mgr.vm01.nwhpas (mgr.14227) 981 : audit [DBG] from='client.16726 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:36.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:36 vm01 bash[28222]: audit 2026-04-16T19:38:34.965923+0000 mgr.vm01.nwhpas (mgr.14227) 981 : audit [DBG] from='client.16726 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:36.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:36 vm01 bash[28222]: cluster 2026-04-16T19:38:35.332815+0000 mgr.vm01.nwhpas (mgr.14227) 982 : cluster [DBG] pgmap v549: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:36.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:36 vm01 bash[28222]: cluster 2026-04-16T19:38:35.332815+0000 mgr.vm01.nwhpas (mgr.14227) 982 : cluster [DBG] pgmap v549: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:36 vm04 bash[34817]: audit 2026-04-16T19:38:34.965923+0000 mgr.vm01.nwhpas (mgr.14227) 981 : audit [DBG] from='client.16726 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:36 vm04 bash[34817]: audit 2026-04-16T19:38:34.965923+0000 mgr.vm01.nwhpas (mgr.14227) 981 : audit [DBG] from='client.16726 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:36 vm04 bash[34817]: cluster 2026-04-16T19:38:35.332815+0000 mgr.vm01.nwhpas (mgr.14227) 982 : cluster [DBG] pgmap v549: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:37.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:36 vm04 bash[34817]: cluster 2026-04-16T19:38:35.332815+0000 mgr.vm01.nwhpas (mgr.14227) 982 : cluster [DBG] pgmap v549: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:37 vm04 bash[34817]: audit 2026-04-16T19:38:37.587257+0000 mon.vm01 (mon.0) 1306 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:38.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:37 vm04 bash[34817]: audit 2026-04-16T19:38:37.587257+0000 mon.vm01 (mon.0) 1306 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:38.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:37 vm01 bash[28222]: audit 2026-04-16T19:38:37.587257+0000 mon.vm01 (mon.0) 1306 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:38.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:37 vm01 bash[28222]: audit 2026-04-16T19:38:37.587257+0000 mon.vm01 (mon.0) 1306 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:39.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:38 vm04 bash[34817]: cluster 2026-04-16T19:38:37.333247+0000 mgr.vm01.nwhpas (mgr.14227) 983 : cluster [DBG] pgmap v550: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:39.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:38 vm04 bash[34817]: cluster 2026-04-16T19:38:37.333247+0000 mgr.vm01.nwhpas (mgr.14227) 983 : cluster [DBG] pgmap v550: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:39.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:38 vm01 bash[28222]: cluster 2026-04-16T19:38:37.333247+0000 mgr.vm01.nwhpas (mgr.14227) 983 : cluster [DBG] pgmap v550: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:39.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:38 vm01 bash[28222]: cluster 2026-04-16T19:38:37.333247+0000 mgr.vm01.nwhpas (mgr.14227) 983 : cluster [DBG] pgmap v550: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:40.430 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:38:40.629 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:38:40.629 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (14m) 9m ago 15m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:38:40.629 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (9m) 9m ago 15m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:38:40.629 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 3m ago 15m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:38:40.629 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 3m ago 15m - - 2026-04-16T19:38:40.867 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:38:40.867 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:38:40.867 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:38:41.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:40 vm04 bash[34817]: cluster 2026-04-16T19:38:39.333677+0000 mgr.vm01.nwhpas (mgr.14227) 984 : cluster [DBG] pgmap v551: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:41.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:40 vm04 bash[34817]: cluster 2026-04-16T19:38:39.333677+0000 mgr.vm01.nwhpas (mgr.14227) 984 : cluster [DBG] pgmap v551: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:41.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:40 vm01 bash[28222]: cluster 2026-04-16T19:38:39.333677+0000 mgr.vm01.nwhpas (mgr.14227) 984 : cluster [DBG] pgmap v551: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:41.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:40 vm01 bash[28222]: cluster 2026-04-16T19:38:39.333677+0000 mgr.vm01.nwhpas (mgr.14227) 984 : cluster [DBG] pgmap v551: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:38:42.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:41 vm04 bash[34817]: audit 2026-04-16T19:38:40.407135+0000 mgr.vm01.nwhpas (mgr.14227) 985 : audit [DBG] from='client.16734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:42.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:41 vm04 bash[34817]: audit 2026-04-16T19:38:40.407135+0000 mgr.vm01.nwhpas (mgr.14227) 985 : audit [DBG] from='client.16734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:42.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:41 vm04 bash[34817]: audit 2026-04-16T19:38:40.620315+0000 mgr.vm01.nwhpas (mgr.14227) 986 : audit [DBG] from='client.16738 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:42.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:41 vm04 bash[34817]: audit 2026-04-16T19:38:40.620315+0000 mgr.vm01.nwhpas (mgr.14227) 986 : audit [DBG] from='client.16738 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:42.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:41 vm04 bash[34817]: audit 2026-04-16T19:38:40.861828+0000 mon.vm01 (mon.0) 1307 : audit [DBG] from='client.? 192.168.123.101:0/1967964662' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:42.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:41 vm04 bash[34817]: audit 2026-04-16T19:38:40.861828+0000 mon.vm01 (mon.0) 1307 : audit [DBG] from='client.? 192.168.123.101:0/1967964662' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:42.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:41 vm01 bash[28222]: audit 2026-04-16T19:38:40.407135+0000 mgr.vm01.nwhpas (mgr.14227) 985 : audit [DBG] from='client.16734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:42.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:41 vm01 bash[28222]: audit 2026-04-16T19:38:40.407135+0000 mgr.vm01.nwhpas (mgr.14227) 985 : audit [DBG] from='client.16734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:42.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:41 vm01 bash[28222]: audit 2026-04-16T19:38:40.620315+0000 mgr.vm01.nwhpas (mgr.14227) 986 : audit [DBG] from='client.16738 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:42.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:41 vm01 bash[28222]: audit 2026-04-16T19:38:40.620315+0000 mgr.vm01.nwhpas (mgr.14227) 986 : audit [DBG] from='client.16738 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:42.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:41 vm01 bash[28222]: audit 2026-04-16T19:38:40.861828+0000 mon.vm01 (mon.0) 1307 : audit [DBG] from='client.? 192.168.123.101:0/1967964662' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:42.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:41 vm01 bash[28222]: audit 2026-04-16T19:38:40.861828+0000 mon.vm01 (mon.0) 1307 : audit [DBG] from='client.? 192.168.123.101:0/1967964662' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:42 vm04 bash[34817]: cluster 2026-04-16T19:38:41.334051+0000 mgr.vm01.nwhpas (mgr.14227) 987 : cluster [DBG] pgmap v552: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:43.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:42 vm04 bash[34817]: cluster 2026-04-16T19:38:41.334051+0000 mgr.vm01.nwhpas (mgr.14227) 987 : cluster [DBG] pgmap v552: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:43.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:42 vm01 bash[28222]: cluster 2026-04-16T19:38:41.334051+0000 mgr.vm01.nwhpas (mgr.14227) 987 : cluster [DBG] pgmap v552: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:43.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:42 vm01 bash[28222]: cluster 2026-04-16T19:38:41.334051+0000 mgr.vm01.nwhpas (mgr.14227) 987 : cluster [DBG] pgmap v552: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:43 vm04 bash[34817]: audit 2026-04-16T19:38:43.343889+0000 mon.vm01 (mon.0) 1308 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:38:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:43 vm04 bash[34817]: audit 2026-04-16T19:38:43.343889+0000 mon.vm01 (mon.0) 1308 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:38:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:43 vm04 bash[34817]: audit 2026-04-16T19:38:43.670066+0000 mon.vm01 (mon.0) 1309 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:38:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:43 vm04 bash[34817]: audit 2026-04-16T19:38:43.670066+0000 mon.vm01 (mon.0) 1309 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:38:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:43 vm04 bash[34817]: audit 2026-04-16T19:38:43.670535+0000 mon.vm01 (mon.0) 1310 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:38:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:43 vm04 bash[34817]: audit 2026-04-16T19:38:43.670535+0000 mon.vm01 (mon.0) 1310 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:38:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:43 vm04 bash[34817]: audit 2026-04-16T19:38:43.675518+0000 mon.vm01 (mon.0) 1311 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:38:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:43 vm04 bash[34817]: audit 2026-04-16T19:38:43.675518+0000 mon.vm01 (mon.0) 1311 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:38:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:43 vm04 bash[34817]: audit 2026-04-16T19:38:43.676829+0000 mon.vm01 (mon.0) 1312 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:38:44.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:43 vm04 bash[34817]: audit 2026-04-16T19:38:43.676829+0000 mon.vm01 (mon.0) 1312 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:38:44.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:43 vm01 bash[28222]: audit 2026-04-16T19:38:43.343889+0000 mon.vm01 (mon.0) 1308 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:38:44.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:43 vm01 bash[28222]: audit 2026-04-16T19:38:43.343889+0000 mon.vm01 (mon.0) 1308 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:38:44.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:43 vm01 bash[28222]: audit 2026-04-16T19:38:43.670066+0000 mon.vm01 (mon.0) 1309 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:38:44.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:43 vm01 bash[28222]: audit 2026-04-16T19:38:43.670066+0000 mon.vm01 (mon.0) 1309 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:38:44.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:43 vm01 bash[28222]: audit 2026-04-16T19:38:43.670535+0000 mon.vm01 (mon.0) 1310 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:38:44.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:43 vm01 bash[28222]: audit 2026-04-16T19:38:43.670535+0000 mon.vm01 (mon.0) 1310 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:38:44.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:43 vm01 bash[28222]: audit 2026-04-16T19:38:43.675518+0000 mon.vm01 (mon.0) 1311 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:38:44.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:43 vm01 bash[28222]: audit 2026-04-16T19:38:43.675518+0000 mon.vm01 (mon.0) 1311 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:38:44.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:43 vm01 bash[28222]: audit 2026-04-16T19:38:43.676829+0000 mon.vm01 (mon.0) 1312 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:38:44.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:43 vm01 bash[28222]: audit 2026-04-16T19:38:43.676829+0000 mon.vm01 (mon.0) 1312 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:38:45.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:44 vm04 bash[34817]: cluster 2026-04-16T19:38:43.334382+0000 mgr.vm01.nwhpas (mgr.14227) 988 : cluster [DBG] pgmap v553: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:45.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:44 vm04 bash[34817]: cluster 2026-04-16T19:38:43.334382+0000 mgr.vm01.nwhpas (mgr.14227) 988 : cluster [DBG] pgmap v553: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:45.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:44 vm04 bash[34817]: cluster 2026-04-16T19:38:43.671274+0000 mgr.vm01.nwhpas (mgr.14227) 989 : cluster [DBG] pgmap v554: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:45.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:44 vm04 bash[34817]: cluster 2026-04-16T19:38:43.671274+0000 mgr.vm01.nwhpas (mgr.14227) 989 : cluster [DBG] pgmap v554: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:45.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:44 vm04 bash[34817]: cluster 2026-04-16T19:38:43.671360+0000 mgr.vm01.nwhpas (mgr.14227) 990 : cluster [DBG] pgmap v555: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:45.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:44 vm04 bash[34817]: cluster 2026-04-16T19:38:43.671360+0000 mgr.vm01.nwhpas (mgr.14227) 990 : cluster [DBG] pgmap v555: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:45.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:44 vm01 bash[28222]: cluster 2026-04-16T19:38:43.334382+0000 mgr.vm01.nwhpas (mgr.14227) 988 : cluster [DBG] pgmap v553: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:45.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:44 vm01 bash[28222]: cluster 2026-04-16T19:38:43.334382+0000 mgr.vm01.nwhpas (mgr.14227) 988 : cluster [DBG] pgmap v553: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:45.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:44 vm01 bash[28222]: cluster 2026-04-16T19:38:43.671274+0000 mgr.vm01.nwhpas (mgr.14227) 989 : cluster [DBG] pgmap v554: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:45.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:44 vm01 bash[28222]: cluster 2026-04-16T19:38:43.671274+0000 mgr.vm01.nwhpas (mgr.14227) 989 : cluster [DBG] pgmap v554: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:45.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:44 vm01 bash[28222]: cluster 2026-04-16T19:38:43.671360+0000 mgr.vm01.nwhpas (mgr.14227) 990 : cluster [DBG] pgmap v555: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:45.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:44 vm01 bash[28222]: cluster 2026-04-16T19:38:43.671360+0000 mgr.vm01.nwhpas (mgr.14227) 990 : cluster [DBG] pgmap v555: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:46.076 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:38:46.251 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:38:46.251 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (14m) 9m ago 15m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:38:46.251 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (9m) 9m ago 15m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:38:46.251 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 4m ago 15m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:38:46.251 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 4m ago 15m - - 2026-04-16T19:38:46.467 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:38:46.467 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:38:46.467 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:38:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:46 vm01 bash[28222]: cluster 2026-04-16T19:38:45.671812+0000 mgr.vm01.nwhpas (mgr.14227) 991 : cluster [DBG] pgmap v556: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:46 vm01 bash[28222]: cluster 2026-04-16T19:38:45.671812+0000 mgr.vm01.nwhpas (mgr.14227) 991 : cluster [DBG] pgmap v556: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:46 vm01 bash[28222]: audit 2026-04-16T19:38:46.461533+0000 mon.vm01 (mon.0) 1313 : audit [DBG] from='client.? 192.168.123.101:0/3961836595' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:46 vm01 bash[28222]: audit 2026-04-16T19:38:46.461533+0000 mon.vm01 (mon.0) 1313 : audit [DBG] from='client.? 192.168.123.101:0/3961836595' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:46 vm04 bash[34817]: cluster 2026-04-16T19:38:45.671812+0000 mgr.vm01.nwhpas (mgr.14227) 991 : cluster [DBG] pgmap v556: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:46 vm04 bash[34817]: cluster 2026-04-16T19:38:45.671812+0000 mgr.vm01.nwhpas (mgr.14227) 991 : cluster [DBG] pgmap v556: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:46 vm04 bash[34817]: audit 2026-04-16T19:38:46.461533+0000 mon.vm01 (mon.0) 1313 : audit [DBG] from='client.? 192.168.123.101:0/3961836595' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:46 vm04 bash[34817]: audit 2026-04-16T19:38:46.461533+0000 mon.vm01 (mon.0) 1313 : audit [DBG] from='client.? 192.168.123.101:0/3961836595' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:47 vm04 bash[34817]: audit 2026-04-16T19:38:46.052720+0000 mgr.vm01.nwhpas (mgr.14227) 992 : audit [DBG] from='client.16746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:47 vm04 bash[34817]: audit 2026-04-16T19:38:46.052720+0000 mgr.vm01.nwhpas (mgr.14227) 992 : audit [DBG] from='client.16746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:47 vm04 bash[34817]: audit 2026-04-16T19:38:46.242580+0000 mgr.vm01.nwhpas (mgr.14227) 993 : audit [DBG] from='client.16750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:47 vm04 bash[34817]: audit 2026-04-16T19:38:46.242580+0000 mgr.vm01.nwhpas (mgr.14227) 993 : audit [DBG] from='client.16750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:48.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:47 vm01 bash[28222]: audit 2026-04-16T19:38:46.052720+0000 mgr.vm01.nwhpas (mgr.14227) 992 : audit [DBG] from='client.16746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:48.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:47 vm01 bash[28222]: audit 2026-04-16T19:38:46.052720+0000 mgr.vm01.nwhpas (mgr.14227) 992 : audit [DBG] from='client.16746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:48.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:47 vm01 bash[28222]: audit 2026-04-16T19:38:46.242580+0000 mgr.vm01.nwhpas (mgr.14227) 993 : audit [DBG] from='client.16750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:48.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:47 vm01 bash[28222]: audit 2026-04-16T19:38:46.242580+0000 mgr.vm01.nwhpas (mgr.14227) 993 : audit [DBG] from='client.16750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:49.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:48 vm04 bash[34817]: cluster 2026-04-16T19:38:47.672209+0000 mgr.vm01.nwhpas (mgr.14227) 994 : cluster [DBG] pgmap v557: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:49.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:48 vm04 bash[34817]: cluster 2026-04-16T19:38:47.672209+0000 mgr.vm01.nwhpas (mgr.14227) 994 : cluster [DBG] pgmap v557: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:49.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:48 vm01 bash[28222]: cluster 2026-04-16T19:38:47.672209+0000 mgr.vm01.nwhpas (mgr.14227) 994 : cluster [DBG] pgmap v557: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:49.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:48 vm01 bash[28222]: cluster 2026-04-16T19:38:47.672209+0000 mgr.vm01.nwhpas (mgr.14227) 994 : cluster [DBG] pgmap v557: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:38:51.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:50 vm04 bash[34817]: cluster 2026-04-16T19:38:49.672674+0000 mgr.vm01.nwhpas (mgr.14227) 995 : cluster [DBG] pgmap v558: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:38:51.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:50 vm04 bash[34817]: cluster 2026-04-16T19:38:49.672674+0000 mgr.vm01.nwhpas (mgr.14227) 995 : cluster [DBG] pgmap v558: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:38:51.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:50 vm01 bash[28222]: cluster 2026-04-16T19:38:49.672674+0000 mgr.vm01.nwhpas (mgr.14227) 995 : cluster [DBG] pgmap v558: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:38:51.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:50 vm01 bash[28222]: cluster 2026-04-16T19:38:49.672674+0000 mgr.vm01.nwhpas (mgr.14227) 995 : cluster [DBG] pgmap v558: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:38:51.668 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:38:51.842 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:38:51.842 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (14m) 9m ago 15m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:38:51.842 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (9m) 9m ago 15m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:38:51.842 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 4m ago 15m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:38:51.842 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 4m ago 15m - - 2026-04-16T19:38:52.057 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:38:52.057 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:38:52.057 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:38:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:52 vm04 bash[34817]: audit 2026-04-16T19:38:51.645207+0000 mgr.vm01.nwhpas (mgr.14227) 996 : audit [DBG] from='client.25815 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:52 vm04 bash[34817]: audit 2026-04-16T19:38:51.645207+0000 mgr.vm01.nwhpas (mgr.14227) 996 : audit [DBG] from='client.25815 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:52 vm04 bash[34817]: cluster 2026-04-16T19:38:51.673076+0000 mgr.vm01.nwhpas (mgr.14227) 997 : cluster [DBG] pgmap v559: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:38:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:52 vm04 bash[34817]: cluster 2026-04-16T19:38:51.673076+0000 mgr.vm01.nwhpas (mgr.14227) 997 : cluster [DBG] pgmap v559: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:38:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:52 vm04 bash[34817]: audit 2026-04-16T19:38:51.833956+0000 mgr.vm01.nwhpas (mgr.14227) 998 : audit [DBG] from='client.16762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:52 vm04 bash[34817]: audit 2026-04-16T19:38:51.833956+0000 mgr.vm01.nwhpas (mgr.14227) 998 : audit [DBG] from='client.16762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:52 vm04 bash[34817]: audit 2026-04-16T19:38:52.051331+0000 mon.vm01 (mon.0) 1314 : audit [DBG] from='client.? 192.168.123.101:0/2686707693' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:52 vm04 bash[34817]: audit 2026-04-16T19:38:52.051331+0000 mon.vm01 (mon.0) 1314 : audit [DBG] from='client.? 192.168.123.101:0/2686707693' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:52 vm04 bash[34817]: audit 2026-04-16T19:38:52.587362+0000 mon.vm01 (mon.0) 1315 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:52 vm04 bash[34817]: audit 2026-04-16T19:38:52.587362+0000 mon.vm01 (mon.0) 1315 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:52 vm01 bash[28222]: audit 2026-04-16T19:38:51.645207+0000 mgr.vm01.nwhpas (mgr.14227) 996 : audit [DBG] from='client.25815 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:52 vm01 bash[28222]: audit 2026-04-16T19:38:51.645207+0000 mgr.vm01.nwhpas (mgr.14227) 996 : audit [DBG] from='client.25815 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:52 vm01 bash[28222]: cluster 2026-04-16T19:38:51.673076+0000 mgr.vm01.nwhpas (mgr.14227) 997 : cluster [DBG] pgmap v559: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:38:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:52 vm01 bash[28222]: cluster 2026-04-16T19:38:51.673076+0000 mgr.vm01.nwhpas (mgr.14227) 997 : cluster [DBG] pgmap v559: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 245 B/s wr, 0 op/s 2026-04-16T19:38:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:52 vm01 bash[28222]: audit 2026-04-16T19:38:51.833956+0000 mgr.vm01.nwhpas (mgr.14227) 998 : audit [DBG] from='client.16762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:52 vm01 bash[28222]: audit 2026-04-16T19:38:51.833956+0000 mgr.vm01.nwhpas (mgr.14227) 998 : audit [DBG] from='client.16762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:52 vm01 bash[28222]: audit 2026-04-16T19:38:52.051331+0000 mon.vm01 (mon.0) 1314 : audit [DBG] from='client.? 192.168.123.101:0/2686707693' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:52 vm01 bash[28222]: audit 2026-04-16T19:38:52.051331+0000 mon.vm01 (mon.0) 1314 : audit [DBG] from='client.? 192.168.123.101:0/2686707693' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:52 vm01 bash[28222]: audit 2026-04-16T19:38:52.587362+0000 mon.vm01 (mon.0) 1315 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:52 vm01 bash[28222]: audit 2026-04-16T19:38:52.587362+0000 mon.vm01 (mon.0) 1315 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:38:55.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:54 vm04 bash[34817]: cluster 2026-04-16T19:38:53.673444+0000 mgr.vm01.nwhpas (mgr.14227) 999 : cluster [DBG] pgmap v560: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:38:55.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:54 vm04 bash[34817]: cluster 2026-04-16T19:38:53.673444+0000 mgr.vm01.nwhpas (mgr.14227) 999 : cluster [DBG] pgmap v560: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:38:55.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:54 vm01 bash[28222]: cluster 2026-04-16T19:38:53.673444+0000 mgr.vm01.nwhpas (mgr.14227) 999 : cluster [DBG] pgmap v560: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:38:55.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:54 vm01 bash[28222]: cluster 2026-04-16T19:38:53.673444+0000 mgr.vm01.nwhpas (mgr.14227) 999 : cluster [DBG] pgmap v560: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-16T19:38:57.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:56 vm04 bash[34817]: cluster 2026-04-16T19:38:55.673864+0000 mgr.vm01.nwhpas (mgr.14227) 1000 : cluster [DBG] pgmap v561: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:57.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:56 vm04 bash[34817]: cluster 2026-04-16T19:38:55.673864+0000 mgr.vm01.nwhpas (mgr.14227) 1000 : cluster [DBG] pgmap v561: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:57.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:56 vm01 bash[28222]: cluster 2026-04-16T19:38:55.673864+0000 mgr.vm01.nwhpas (mgr.14227) 1000 : cluster [DBG] pgmap v561: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:57.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:56 vm01 bash[28222]: cluster 2026-04-16T19:38:55.673864+0000 mgr.vm01.nwhpas (mgr.14227) 1000 : cluster [DBG] pgmap v561: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:57.266 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:38:57.442 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:38:57.442 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (15m) 9m ago 15m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:38:57.442 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (9m) 9m ago 15m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:38:57.442 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 4m ago 15m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:38:57.442 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 4m ago 15m - - 2026-04-16T19:38:57.661 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:38:57.661 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:38:57.661 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:38:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:57 vm04 bash[34817]: audit 2026-04-16T19:38:57.655105+0000 mon.vm01 (mon.0) 1316 : audit [DBG] from='client.? 192.168.123.101:0/58834956' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:57 vm04 bash[34817]: audit 2026-04-16T19:38:57.655105+0000 mon.vm01 (mon.0) 1316 : audit [DBG] from='client.? 192.168.123.101:0/58834956' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:57 vm01 bash[28222]: audit 2026-04-16T19:38:57.655105+0000 mon.vm01 (mon.0) 1316 : audit [DBG] from='client.? 192.168.123.101:0/58834956' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:57 vm01 bash[28222]: audit 2026-04-16T19:38:57.655105+0000 mon.vm01 (mon.0) 1316 : audit [DBG] from='client.? 192.168.123.101:0/58834956' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:38:59.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:58 vm04 bash[34817]: audit 2026-04-16T19:38:57.243650+0000 mgr.vm01.nwhpas (mgr.14227) 1001 : audit [DBG] from='client.16770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:59.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:58 vm04 bash[34817]: audit 2026-04-16T19:38:57.243650+0000 mgr.vm01.nwhpas (mgr.14227) 1001 : audit [DBG] from='client.16770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:59.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:58 vm04 bash[34817]: audit 2026-04-16T19:38:57.433707+0000 mgr.vm01.nwhpas (mgr.14227) 1002 : audit [DBG] from='client.16774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:59.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:58 vm04 bash[34817]: audit 2026-04-16T19:38:57.433707+0000 mgr.vm01.nwhpas (mgr.14227) 1002 : audit [DBG] from='client.16774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:59.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:58 vm04 bash[34817]: cluster 2026-04-16T19:38:57.674229+0000 mgr.vm01.nwhpas (mgr.14227) 1003 : cluster [DBG] pgmap v562: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:59.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:38:58 vm04 bash[34817]: cluster 2026-04-16T19:38:57.674229+0000 mgr.vm01.nwhpas (mgr.14227) 1003 : cluster [DBG] pgmap v562: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:58 vm01 bash[28222]: audit 2026-04-16T19:38:57.243650+0000 mgr.vm01.nwhpas (mgr.14227) 1001 : audit [DBG] from='client.16770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:58 vm01 bash[28222]: audit 2026-04-16T19:38:57.243650+0000 mgr.vm01.nwhpas (mgr.14227) 1001 : audit [DBG] from='client.16770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:58 vm01 bash[28222]: audit 2026-04-16T19:38:57.433707+0000 mgr.vm01.nwhpas (mgr.14227) 1002 : audit [DBG] from='client.16774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:58 vm01 bash[28222]: audit 2026-04-16T19:38:57.433707+0000 mgr.vm01.nwhpas (mgr.14227) 1002 : audit [DBG] from='client.16774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:38:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:58 vm01 bash[28222]: cluster 2026-04-16T19:38:57.674229+0000 mgr.vm01.nwhpas (mgr.14227) 1003 : cluster [DBG] pgmap v562: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:38:59.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:38:58 vm01 bash[28222]: cluster 2026-04-16T19:38:57.674229+0000 mgr.vm01.nwhpas (mgr.14227) 1003 : cluster [DBG] pgmap v562: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:39:01.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:00 vm04 bash[34817]: cluster 2026-04-16T19:38:59.674679+0000 mgr.vm01.nwhpas (mgr.14227) 1004 : cluster [DBG] pgmap v563: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 511 B/s wr, 7 op/s 2026-04-16T19:39:01.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:00 vm04 bash[34817]: cluster 2026-04-16T19:38:59.674679+0000 mgr.vm01.nwhpas (mgr.14227) 1004 : cluster [DBG] pgmap v563: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 511 B/s wr, 7 op/s 2026-04-16T19:39:01.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:00 vm01 bash[28222]: cluster 2026-04-16T19:38:59.674679+0000 mgr.vm01.nwhpas (mgr.14227) 1004 : cluster [DBG] pgmap v563: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 511 B/s wr, 7 op/s 2026-04-16T19:39:01.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:00 vm01 bash[28222]: cluster 2026-04-16T19:38:59.674679+0000 mgr.vm01.nwhpas (mgr.14227) 1004 : cluster [DBG] pgmap v563: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 511 B/s wr, 7 op/s 2026-04-16T19:39:02.858 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:39:03.031 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:39:03.031 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (15m) 9m ago 15m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:39:03.031 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (9m) 9m ago 15m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:39:03.031 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 4m ago 15m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:39:03.031 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 4m ago 16m - - 2026-04-16T19:39:03.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:02 vm04 bash[34817]: cluster 2026-04-16T19:39:01.675147+0000 mgr.vm01.nwhpas (mgr.14227) 1005 : cluster [DBG] pgmap v564: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-16T19:39:03.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:02 vm04 bash[34817]: cluster 2026-04-16T19:39:01.675147+0000 mgr.vm01.nwhpas (mgr.14227) 1005 : cluster [DBG] pgmap v564: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-16T19:39:03.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:02 vm01 bash[28222]: cluster 2026-04-16T19:39:01.675147+0000 mgr.vm01.nwhpas (mgr.14227) 1005 : cluster [DBG] pgmap v564: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-16T19:39:03.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:02 vm01 bash[28222]: cluster 2026-04-16T19:39:01.675147+0000 mgr.vm01.nwhpas (mgr.14227) 1005 : cluster [DBG] pgmap v564: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-16T19:39:03.248 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:39:03.248 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:39:03.248 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:39:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:03 vm04 bash[34817]: audit 2026-04-16T19:39:02.830282+0000 mgr.vm01.nwhpas (mgr.14227) 1006 : audit [DBG] from='client.16782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:03 vm04 bash[34817]: audit 2026-04-16T19:39:02.830282+0000 mgr.vm01.nwhpas (mgr.14227) 1006 : audit [DBG] from='client.16782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:03 vm04 bash[34817]: audit 2026-04-16T19:39:03.242336+0000 mon.vm01 (mon.0) 1317 : audit [DBG] from='client.? 192.168.123.101:0/462062276' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:04.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:03 vm04 bash[34817]: audit 2026-04-16T19:39:03.242336+0000 mon.vm01 (mon.0) 1317 : audit [DBG] from='client.? 192.168.123.101:0/462062276' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:03 vm01 bash[28222]: audit 2026-04-16T19:39:02.830282+0000 mgr.vm01.nwhpas (mgr.14227) 1006 : audit [DBG] from='client.16782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:03 vm01 bash[28222]: audit 2026-04-16T19:39:02.830282+0000 mgr.vm01.nwhpas (mgr.14227) 1006 : audit [DBG] from='client.16782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:03 vm01 bash[28222]: audit 2026-04-16T19:39:03.242336+0000 mon.vm01 (mon.0) 1317 : audit [DBG] from='client.? 192.168.123.101:0/462062276' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:04.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:03 vm01 bash[28222]: audit 2026-04-16T19:39:03.242336+0000 mon.vm01 (mon.0) 1317 : audit [DBG] from='client.? 192.168.123.101:0/462062276' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:05.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:04 vm04 bash[34817]: audit 2026-04-16T19:39:03.021478+0000 mgr.vm01.nwhpas (mgr.14227) 1007 : audit [DBG] from='client.16786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:05.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:04 vm04 bash[34817]: audit 2026-04-16T19:39:03.021478+0000 mgr.vm01.nwhpas (mgr.14227) 1007 : audit [DBG] from='client.16786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:05.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:04 vm04 bash[34817]: cluster 2026-04-16T19:39:03.675568+0000 mgr.vm01.nwhpas (mgr.14227) 1008 : cluster [DBG] pgmap v565: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-16T19:39:05.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:04 vm04 bash[34817]: cluster 2026-04-16T19:39:03.675568+0000 mgr.vm01.nwhpas (mgr.14227) 1008 : cluster [DBG] pgmap v565: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-16T19:39:05.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:04 vm01 bash[28222]: audit 2026-04-16T19:39:03.021478+0000 mgr.vm01.nwhpas (mgr.14227) 1007 : audit [DBG] from='client.16786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:05.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:04 vm01 bash[28222]: audit 2026-04-16T19:39:03.021478+0000 mgr.vm01.nwhpas (mgr.14227) 1007 : audit [DBG] from='client.16786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:05.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:04 vm01 bash[28222]: cluster 2026-04-16T19:39:03.675568+0000 mgr.vm01.nwhpas (mgr.14227) 1008 : cluster [DBG] pgmap v565: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-16T19:39:05.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:04 vm01 bash[28222]: cluster 2026-04-16T19:39:03.675568+0000 mgr.vm01.nwhpas (mgr.14227) 1008 : cluster [DBG] pgmap v565: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-16T19:39:07.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:06 vm04 bash[34817]: cluster 2026-04-16T19:39:05.675967+0000 mgr.vm01.nwhpas (mgr.14227) 1009 : cluster [DBG] pgmap v566: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:07.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:06 vm04 bash[34817]: cluster 2026-04-16T19:39:05.675967+0000 mgr.vm01.nwhpas (mgr.14227) 1009 : cluster [DBG] pgmap v566: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:07.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:06 vm01 bash[28222]: cluster 2026-04-16T19:39:05.675967+0000 mgr.vm01.nwhpas (mgr.14227) 1009 : cluster [DBG] pgmap v566: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:07.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:06 vm01 bash[28222]: cluster 2026-04-16T19:39:05.675967+0000 mgr.vm01.nwhpas (mgr.14227) 1009 : cluster [DBG] pgmap v566: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:08.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:07 vm04 bash[34817]: audit 2026-04-16T19:39:07.587544+0000 mon.vm01 (mon.0) 1318 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:08.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:07 vm04 bash[34817]: audit 2026-04-16T19:39:07.587544+0000 mon.vm01 (mon.0) 1318 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:08.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:07 vm01 bash[28222]: audit 2026-04-16T19:39:07.587544+0000 mon.vm01 (mon.0) 1318 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:08.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:07 vm01 bash[28222]: audit 2026-04-16T19:39:07.587544+0000 mon.vm01 (mon.0) 1318 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:08.438 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:39:08.611 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:39:08.611 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (15m) 9m ago 16m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:39:08.611 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (9m) 9m ago 16m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:39:08.611 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 4m ago 16m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:39:08.611 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 4m ago 16m - - 2026-04-16T19:39:08.827 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:39:08.827 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:39:08.827 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:39:09.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:08 vm04 bash[34817]: cluster 2026-04-16T19:39:07.676341+0000 mgr.vm01.nwhpas (mgr.14227) 1010 : cluster [DBG] pgmap v567: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:09.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:08 vm04 bash[34817]: cluster 2026-04-16T19:39:07.676341+0000 mgr.vm01.nwhpas (mgr.14227) 1010 : cluster [DBG] pgmap v567: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:09.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:08 vm04 bash[34817]: audit 2026-04-16T19:39:08.821337+0000 mon.vm01 (mon.0) 1319 : audit [DBG] from='client.? 192.168.123.101:0/1437077993' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:09.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:08 vm04 bash[34817]: audit 2026-04-16T19:39:08.821337+0000 mon.vm01 (mon.0) 1319 : audit [DBG] from='client.? 192.168.123.101:0/1437077993' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:09.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:08 vm01 bash[28222]: cluster 2026-04-16T19:39:07.676341+0000 mgr.vm01.nwhpas (mgr.14227) 1010 : cluster [DBG] pgmap v567: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:09.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:08 vm01 bash[28222]: cluster 2026-04-16T19:39:07.676341+0000 mgr.vm01.nwhpas (mgr.14227) 1010 : cluster [DBG] pgmap v567: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:09.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:08 vm01 bash[28222]: audit 2026-04-16T19:39:08.821337+0000 mon.vm01 (mon.0) 1319 : audit [DBG] from='client.? 192.168.123.101:0/1437077993' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:09.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:08 vm01 bash[28222]: audit 2026-04-16T19:39:08.821337+0000 mon.vm01 (mon.0) 1319 : audit [DBG] from='client.? 192.168.123.101:0/1437077993' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:10.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:09 vm04 bash[34817]: audit 2026-04-16T19:39:08.416900+0000 mgr.vm01.nwhpas (mgr.14227) 1011 : audit [DBG] from='client.16794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:10.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:09 vm04 bash[34817]: audit 2026-04-16T19:39:08.416900+0000 mgr.vm01.nwhpas (mgr.14227) 1011 : audit [DBG] from='client.16794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:10.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:09 vm04 bash[34817]: audit 2026-04-16T19:39:08.602812+0000 mgr.vm01.nwhpas (mgr.14227) 1012 : audit [DBG] from='client.16798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:10.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:09 vm04 bash[34817]: audit 2026-04-16T19:39:08.602812+0000 mgr.vm01.nwhpas (mgr.14227) 1012 : audit [DBG] from='client.16798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:10.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:09 vm01 bash[28222]: audit 2026-04-16T19:39:08.416900+0000 mgr.vm01.nwhpas (mgr.14227) 1011 : audit [DBG] from='client.16794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:10.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:09 vm01 bash[28222]: audit 2026-04-16T19:39:08.416900+0000 mgr.vm01.nwhpas (mgr.14227) 1011 : audit [DBG] from='client.16794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:10.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:09 vm01 bash[28222]: audit 2026-04-16T19:39:08.602812+0000 mgr.vm01.nwhpas (mgr.14227) 1012 : audit [DBG] from='client.16798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:10.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:09 vm01 bash[28222]: audit 2026-04-16T19:39:08.602812+0000 mgr.vm01.nwhpas (mgr.14227) 1012 : audit [DBG] from='client.16798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:11.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:10 vm04 bash[34817]: cluster 2026-04-16T19:39:09.676751+0000 mgr.vm01.nwhpas (mgr.14227) 1013 : cluster [DBG] pgmap v568: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:11.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:10 vm04 bash[34817]: cluster 2026-04-16T19:39:09.676751+0000 mgr.vm01.nwhpas (mgr.14227) 1013 : cluster [DBG] pgmap v568: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:11.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:10 vm01 bash[28222]: cluster 2026-04-16T19:39:09.676751+0000 mgr.vm01.nwhpas (mgr.14227) 1013 : cluster [DBG] pgmap v568: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:11.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:10 vm01 bash[28222]: cluster 2026-04-16T19:39:09.676751+0000 mgr.vm01.nwhpas (mgr.14227) 1013 : cluster [DBG] pgmap v568: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 16 op/s 2026-04-16T19:39:13.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:12 vm04 bash[34817]: cluster 2026-04-16T19:39:11.677160+0000 mgr.vm01.nwhpas (mgr.14227) 1014 : cluster [DBG] pgmap v569: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 4.7 KiB/s rd, 0 B/s wr, 9 op/s 2026-04-16T19:39:13.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:12 vm04 bash[34817]: cluster 2026-04-16T19:39:11.677160+0000 mgr.vm01.nwhpas (mgr.14227) 1014 : cluster [DBG] pgmap v569: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 4.7 KiB/s rd, 0 B/s wr, 9 op/s 2026-04-16T19:39:13.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:12 vm01 bash[28222]: cluster 2026-04-16T19:39:11.677160+0000 mgr.vm01.nwhpas (mgr.14227) 1014 : cluster [DBG] pgmap v569: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 4.7 KiB/s rd, 0 B/s wr, 9 op/s 2026-04-16T19:39:13.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:12 vm01 bash[28222]: cluster 2026-04-16T19:39:11.677160+0000 mgr.vm01.nwhpas (mgr.14227) 1014 : cluster [DBG] pgmap v569: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 4.7 KiB/s rd, 0 B/s wr, 9 op/s 2026-04-16T19:39:14.061 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:39:14.233 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:39:14.233 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (15m) 9m ago 16m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:39:14.233 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (10m) 9m ago 16m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:39:14.233 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 4m ago 16m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:39:14.233 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 4m ago 16m - - 2026-04-16T19:39:14.491 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:39:14.491 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:39:14.491 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:39:15.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:14 vm04 bash[34817]: cluster 2026-04-16T19:39:13.677539+0000 mgr.vm01.nwhpas (mgr.14227) 1015 : cluster [DBG] pgmap v570: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:39:15.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:14 vm04 bash[34817]: cluster 2026-04-16T19:39:13.677539+0000 mgr.vm01.nwhpas (mgr.14227) 1015 : cluster [DBG] pgmap v570: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:39:15.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:14 vm04 bash[34817]: audit 2026-04-16T19:39:14.485210+0000 mon.vm01 (mon.0) 1320 : audit [DBG] from='client.? 192.168.123.101:0/978771570' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:15.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:14 vm04 bash[34817]: audit 2026-04-16T19:39:14.485210+0000 mon.vm01 (mon.0) 1320 : audit [DBG] from='client.? 192.168.123.101:0/978771570' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:15.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:14 vm01 bash[28222]: cluster 2026-04-16T19:39:13.677539+0000 mgr.vm01.nwhpas (mgr.14227) 1015 : cluster [DBG] pgmap v570: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:39:15.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:14 vm01 bash[28222]: cluster 2026-04-16T19:39:13.677539+0000 mgr.vm01.nwhpas (mgr.14227) 1015 : cluster [DBG] pgmap v570: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-16T19:39:15.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:14 vm01 bash[28222]: audit 2026-04-16T19:39:14.485210+0000 mon.vm01 (mon.0) 1320 : audit [DBG] from='client.? 192.168.123.101:0/978771570' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:15.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:14 vm01 bash[28222]: audit 2026-04-16T19:39:14.485210+0000 mon.vm01 (mon.0) 1320 : audit [DBG] from='client.? 192.168.123.101:0/978771570' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:16.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:15 vm04 bash[34817]: audit 2026-04-16T19:39:14.038971+0000 mgr.vm01.nwhpas (mgr.14227) 1016 : audit [DBG] from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:16.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:15 vm04 bash[34817]: audit 2026-04-16T19:39:14.038971+0000 mgr.vm01.nwhpas (mgr.14227) 1016 : audit [DBG] from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:16.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:15 vm04 bash[34817]: audit 2026-04-16T19:39:14.224565+0000 mgr.vm01.nwhpas (mgr.14227) 1017 : audit [DBG] from='client.16810 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:16.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:15 vm04 bash[34817]: audit 2026-04-16T19:39:14.224565+0000 mgr.vm01.nwhpas (mgr.14227) 1017 : audit [DBG] from='client.16810 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:16.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:15 vm01 bash[28222]: audit 2026-04-16T19:39:14.038971+0000 mgr.vm01.nwhpas (mgr.14227) 1016 : audit [DBG] from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:16.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:15 vm01 bash[28222]: audit 2026-04-16T19:39:14.038971+0000 mgr.vm01.nwhpas (mgr.14227) 1016 : audit [DBG] from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:16.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:15 vm01 bash[28222]: audit 2026-04-16T19:39:14.224565+0000 mgr.vm01.nwhpas (mgr.14227) 1017 : audit [DBG] from='client.16810 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:16.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:15 vm01 bash[28222]: audit 2026-04-16T19:39:14.224565+0000 mgr.vm01.nwhpas (mgr.14227) 1017 : audit [DBG] from='client.16810 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:17.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:16 vm04 bash[34817]: cluster 2026-04-16T19:39:15.677967+0000 mgr.vm01.nwhpas (mgr.14227) 1018 : cluster [DBG] pgmap v571: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 0 B/s wr, 12 op/s 2026-04-16T19:39:17.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:16 vm04 bash[34817]: cluster 2026-04-16T19:39:15.677967+0000 mgr.vm01.nwhpas (mgr.14227) 1018 : cluster [DBG] pgmap v571: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 0 B/s wr, 12 op/s 2026-04-16T19:39:17.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:16 vm01 bash[28222]: cluster 2026-04-16T19:39:15.677967+0000 mgr.vm01.nwhpas (mgr.14227) 1018 : cluster [DBG] pgmap v571: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 0 B/s wr, 12 op/s 2026-04-16T19:39:17.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:16 vm01 bash[28222]: cluster 2026-04-16T19:39:15.677967+0000 mgr.vm01.nwhpas (mgr.14227) 1018 : cluster [DBG] pgmap v571: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 0 B/s wr, 12 op/s 2026-04-16T19:39:19.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:18 vm04 bash[34817]: cluster 2026-04-16T19:39:17.678340+0000 mgr.vm01.nwhpas (mgr.14227) 1019 : cluster [DBG] pgmap v572: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 30 op/s 2026-04-16T19:39:19.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:18 vm04 bash[34817]: cluster 2026-04-16T19:39:17.678340+0000 mgr.vm01.nwhpas (mgr.14227) 1019 : cluster [DBG] pgmap v572: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 30 op/s 2026-04-16T19:39:19.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:18 vm01 bash[28222]: cluster 2026-04-16T19:39:17.678340+0000 mgr.vm01.nwhpas (mgr.14227) 1019 : cluster [DBG] pgmap v572: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 30 op/s 2026-04-16T19:39:19.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:18 vm01 bash[28222]: cluster 2026-04-16T19:39:17.678340+0000 mgr.vm01.nwhpas (mgr.14227) 1019 : cluster [DBG] pgmap v572: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 30 op/s 2026-04-16T19:39:19.685 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:39:19.852 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:39:19.852 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (15m) 10m ago 16m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:39:19.852 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (10m) 10m ago 16m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:39:19.852 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (4m) 4m ago 16m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:39:19.852 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 4m ago 16m - - 2026-04-16T19:39:20.070 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:39:20.070 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:39:20.070 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:39:21.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:20 vm04 bash[34817]: audit 2026-04-16T19:39:19.664350+0000 mgr.vm01.nwhpas (mgr.14227) 1020 : audit [DBG] from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:21.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:20 vm04 bash[34817]: audit 2026-04-16T19:39:19.664350+0000 mgr.vm01.nwhpas (mgr.14227) 1020 : audit [DBG] from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:21.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:20 vm04 bash[34817]: cluster 2026-04-16T19:39:19.678677+0000 mgr.vm01.nwhpas (mgr.14227) 1021 : cluster [DBG] pgmap v573: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:39:21.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:20 vm04 bash[34817]: cluster 2026-04-16T19:39:19.678677+0000 mgr.vm01.nwhpas (mgr.14227) 1021 : cluster [DBG] pgmap v573: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:39:21.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:20 vm04 bash[34817]: audit 2026-04-16T19:39:19.843812+0000 mgr.vm01.nwhpas (mgr.14227) 1022 : audit [DBG] from='client.16822 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:21.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:20 vm04 bash[34817]: audit 2026-04-16T19:39:19.843812+0000 mgr.vm01.nwhpas (mgr.14227) 1022 : audit [DBG] from='client.16822 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:21.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:20 vm04 bash[34817]: audit 2026-04-16T19:39:20.064356+0000 mon.vm01 (mon.0) 1321 : audit [DBG] from='client.? 192.168.123.101:0/1037373813' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:21.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:20 vm04 bash[34817]: audit 2026-04-16T19:39:20.064356+0000 mon.vm01 (mon.0) 1321 : audit [DBG] from='client.? 192.168.123.101:0/1037373813' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:21.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:20 vm01 bash[28222]: audit 2026-04-16T19:39:19.664350+0000 mgr.vm01.nwhpas (mgr.14227) 1020 : audit [DBG] from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:21.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:20 vm01 bash[28222]: audit 2026-04-16T19:39:19.664350+0000 mgr.vm01.nwhpas (mgr.14227) 1020 : audit [DBG] from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:21.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:20 vm01 bash[28222]: cluster 2026-04-16T19:39:19.678677+0000 mgr.vm01.nwhpas (mgr.14227) 1021 : cluster [DBG] pgmap v573: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:39:21.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:20 vm01 bash[28222]: cluster 2026-04-16T19:39:19.678677+0000 mgr.vm01.nwhpas (mgr.14227) 1021 : cluster [DBG] pgmap v573: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:39:21.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:20 vm01 bash[28222]: audit 2026-04-16T19:39:19.843812+0000 mgr.vm01.nwhpas (mgr.14227) 1022 : audit [DBG] from='client.16822 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:21.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:20 vm01 bash[28222]: audit 2026-04-16T19:39:19.843812+0000 mgr.vm01.nwhpas (mgr.14227) 1022 : audit [DBG] from='client.16822 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:21.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:20 vm01 bash[28222]: audit 2026-04-16T19:39:20.064356+0000 mon.vm01 (mon.0) 1321 : audit [DBG] from='client.? 192.168.123.101:0/1037373813' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:21.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:20 vm01 bash[28222]: audit 2026-04-16T19:39:20.064356+0000 mon.vm01 (mon.0) 1321 : audit [DBG] from='client.? 192.168.123.101:0/1037373813' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:22.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:21 vm04 bash[34817]: cluster 2026-04-16T19:39:21.679056+0000 mgr.vm01.nwhpas (mgr.14227) 1023 : cluster [DBG] pgmap v574: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:39:22.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:21 vm04 bash[34817]: cluster 2026-04-16T19:39:21.679056+0000 mgr.vm01.nwhpas (mgr.14227) 1023 : cluster [DBG] pgmap v574: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:39:22.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:21 vm01 bash[28222]: cluster 2026-04-16T19:39:21.679056+0000 mgr.vm01.nwhpas (mgr.14227) 1023 : cluster [DBG] pgmap v574: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:39:22.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:21 vm01 bash[28222]: cluster 2026-04-16T19:39:21.679056+0000 mgr.vm01.nwhpas (mgr.14227) 1023 : cluster [DBG] pgmap v574: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-16T19:39:23.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:22 vm04 bash[34817]: audit 2026-04-16T19:39:22.587893+0000 mon.vm01 (mon.0) 1322 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:23.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:22 vm04 bash[34817]: audit 2026-04-16T19:39:22.587893+0000 mon.vm01 (mon.0) 1322 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:23.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:22 vm01 bash[28222]: audit 2026-04-16T19:39:22.587893+0000 mon.vm01 (mon.0) 1322 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:23.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:22 vm01 bash[28222]: audit 2026-04-16T19:39:22.587893+0000 mon.vm01 (mon.0) 1322 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:24.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:23 vm04 bash[34817]: cluster 2026-04-16T19:39:23.679417+0000 mgr.vm01.nwhpas (mgr.14227) 1024 : cluster [DBG] pgmap v575: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:39:24.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:23 vm04 bash[34817]: cluster 2026-04-16T19:39:23.679417+0000 mgr.vm01.nwhpas (mgr.14227) 1024 : cluster [DBG] pgmap v575: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:39:24.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:23 vm01 bash[28222]: cluster 2026-04-16T19:39:23.679417+0000 mgr.vm01.nwhpas (mgr.14227) 1024 : cluster [DBG] pgmap v575: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:39:24.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:23 vm01 bash[28222]: cluster 2026-04-16T19:39:23.679417+0000 mgr.vm01.nwhpas (mgr.14227) 1024 : cluster [DBG] pgmap v575: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:39:25.260 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to stop 2026-04-16T19:39:25.435 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:39:25.436 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (15m) 10m ago 16m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:39:25.436 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (10m) 10m ago 16m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:39:25.436 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 4m ago 16m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:39:25.436 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 4m ago 16m - - 2026-04-16T19:39:25.594 INFO:teuthology.orchestra.run.vm01.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-16T19:39:25.594 INFO:teuthology.orchestra.run.vm01.stderr: Dload Upload Total Spent Left Speed 2026-04-16T19:39:25.594 INFO:teuthology.orchestra.run.vm01.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-16T19:39:25.779 INFO:teuthology.orchestra.run.vm01.stdout:anonymousScheduled to start rgw.foo.vm04.uxumrv on host 'vm04' 2026-04-16T19:39:25.978 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to start 2026-04-16T19:39:26.170 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:39:26.170 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (15m) 10m ago 16m 116M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:39:26.170 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (10m) 10m ago 16m 92.4M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:39:26.170 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 4m ago 16m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:39:26.170 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 4m ago 16m - - 2026-04-16T19:39:26.391 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:39:26.391 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:39:26.391 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.239065+0000 mgr.vm01.nwhpas (mgr.14227) 1025 : audit [DBG] from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.239065+0000 mgr.vm01.nwhpas (mgr.14227) 1025 : audit [DBG] from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.427126+0000 mgr.vm01.nwhpas (mgr.14227) 1026 : audit [DBG] from='client.16834 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.427126+0000 mgr.vm01.nwhpas (mgr.14227) 1026 : audit [DBG] from='client.16834 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: cluster 2026-04-16T19:39:25.679805+0000 mgr.vm01.nwhpas (mgr.14227) 1027 : cluster [DBG] pgmap v576: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: cluster 2026-04-16T19:39:25.679805+0000 mgr.vm01.nwhpas (mgr.14227) 1027 : cluster [DBG] pgmap v576: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.763693+0000 mgr.vm01.nwhpas (mgr.14227) 1028 : audit [DBG] from='client.16842 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.uxumrv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.763693+0000 mgr.vm01.nwhpas (mgr.14227) 1028 : audit [DBG] from='client.16842 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.uxumrv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: cephadm 2026-04-16T19:39:25.764023+0000 mgr.vm01.nwhpas (mgr.14227) 1029 : cephadm [INF] Schedule start daemon rgw.foo.vm04.uxumrv 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: cephadm 2026-04-16T19:39:25.764023+0000 mgr.vm01.nwhpas (mgr.14227) 1029 : cephadm [INF] Schedule start daemon rgw.foo.vm04.uxumrv 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.768772+0000 mon.vm01 (mon.0) 1323 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.768772+0000 mon.vm01 (mon.0) 1323 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.772860+0000 mon.vm01 (mon.0) 1324 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.772860+0000 mon.vm01 (mon.0) 1324 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.773445+0000 mon.vm01 (mon.0) 1325 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.773445+0000 mon.vm01 (mon.0) 1325 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.957218+0000 mgr.vm01.nwhpas (mgr.14227) 1030 : audit [DBG] from='client.16846 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:25.957218+0000 mgr.vm01.nwhpas (mgr.14227) 1030 : audit [DBG] from='client.16846 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:26.385066+0000 mon.vm01 (mon.0) 1326 : audit [DBG] from='client.? 192.168.123.101:0/473446999' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:26.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:26 vm01 bash[28222]: audit 2026-04-16T19:39:26.385066+0000 mon.vm01 (mon.0) 1326 : audit [DBG] from='client.? 192.168.123.101:0/473446999' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.239065+0000 mgr.vm01.nwhpas (mgr.14227) 1025 : audit [DBG] from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.239065+0000 mgr.vm01.nwhpas (mgr.14227) 1025 : audit [DBG] from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.427126+0000 mgr.vm01.nwhpas (mgr.14227) 1026 : audit [DBG] from='client.16834 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.427126+0000 mgr.vm01.nwhpas (mgr.14227) 1026 : audit [DBG] from='client.16834 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: cluster 2026-04-16T19:39:25.679805+0000 mgr.vm01.nwhpas (mgr.14227) 1027 : cluster [DBG] pgmap v576: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: cluster 2026-04-16T19:39:25.679805+0000 mgr.vm01.nwhpas (mgr.14227) 1027 : cluster [DBG] pgmap v576: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.763693+0000 mgr.vm01.nwhpas (mgr.14227) 1028 : audit [DBG] from='client.16842 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.uxumrv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.763693+0000 mgr.vm01.nwhpas (mgr.14227) 1028 : audit [DBG] from='client.16842 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm04.uxumrv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: cephadm 2026-04-16T19:39:25.764023+0000 mgr.vm01.nwhpas (mgr.14227) 1029 : cephadm [INF] Schedule start daemon rgw.foo.vm04.uxumrv 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: cephadm 2026-04-16T19:39:25.764023+0000 mgr.vm01.nwhpas (mgr.14227) 1029 : cephadm [INF] Schedule start daemon rgw.foo.vm04.uxumrv 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.768772+0000 mon.vm01 (mon.0) 1323 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.768772+0000 mon.vm01 (mon.0) 1323 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.772860+0000 mon.vm01 (mon.0) 1324 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.772860+0000 mon.vm01 (mon.0) 1324 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.773445+0000 mon.vm01 (mon.0) 1325 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.773445+0000 mon.vm01 (mon.0) 1325 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.957218+0000 mgr.vm01.nwhpas (mgr.14227) 1030 : audit [DBG] from='client.16846 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:25.957218+0000 mgr.vm01.nwhpas (mgr.14227) 1030 : audit [DBG] from='client.16846 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:26.385066+0000 mon.vm01 (mon.0) 1326 : audit [DBG] from='client.? 192.168.123.101:0/473446999' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:27.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:26 vm04 bash[34817]: audit 2026-04-16T19:39:26.385066+0000 mon.vm01 (mon.0) 1326 : audit [DBG] from='client.? 192.168.123.101:0/473446999' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:28.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:27 vm04 bash[34817]: audit 2026-04-16T19:39:26.161522+0000 mgr.vm01.nwhpas (mgr.14227) 1031 : audit [DBG] from='client.25885 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:28.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:27 vm04 bash[34817]: audit 2026-04-16T19:39:26.161522+0000 mgr.vm01.nwhpas (mgr.14227) 1031 : audit [DBG] from='client.25885 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:28.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:27 vm01 bash[28222]: audit 2026-04-16T19:39:26.161522+0000 mgr.vm01.nwhpas (mgr.14227) 1031 : audit [DBG] from='client.25885 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:28.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:27 vm01 bash[28222]: audit 2026-04-16T19:39:26.161522+0000 mgr.vm01.nwhpas (mgr.14227) 1031 : audit [DBG] from='client.25885 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:29.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:28 vm04 bash[34817]: cluster 2026-04-16T19:39:27.680192+0000 mgr.vm01.nwhpas (mgr.14227) 1032 : cluster [DBG] pgmap v577: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 341 B/s wr, 47 op/s 2026-04-16T19:39:29.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:28 vm04 bash[34817]: cluster 2026-04-16T19:39:27.680192+0000 mgr.vm01.nwhpas (mgr.14227) 1032 : cluster [DBG] pgmap v577: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 341 B/s wr, 47 op/s 2026-04-16T19:39:29.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:28 vm01 bash[28222]: cluster 2026-04-16T19:39:27.680192+0000 mgr.vm01.nwhpas (mgr.14227) 1032 : cluster [DBG] pgmap v577: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 341 B/s wr, 47 op/s 2026-04-16T19:39:29.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:28 vm01 bash[28222]: cluster 2026-04-16T19:39:27.680192+0000 mgr.vm01.nwhpas (mgr.14227) 1032 : cluster [DBG] pgmap v577: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 341 B/s wr, 47 op/s 2026-04-16T19:39:31.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:30 vm04 bash[34817]: cluster 2026-04-16T19:39:29.680586+0000 mgr.vm01.nwhpas (mgr.14227) 1033 : cluster [DBG] pgmap v578: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 511 B/s wr, 29 op/s 2026-04-16T19:39:31.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:30 vm04 bash[34817]: cluster 2026-04-16T19:39:29.680586+0000 mgr.vm01.nwhpas (mgr.14227) 1033 : cluster [DBG] pgmap v578: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 511 B/s wr, 29 op/s 2026-04-16T19:39:31.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:30 vm01 bash[28222]: cluster 2026-04-16T19:39:29.680586+0000 mgr.vm01.nwhpas (mgr.14227) 1033 : cluster [DBG] pgmap v578: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 511 B/s wr, 29 op/s 2026-04-16T19:39:31.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:30 vm01 bash[28222]: cluster 2026-04-16T19:39:29.680586+0000 mgr.vm01.nwhpas (mgr.14227) 1033 : cluster [DBG] pgmap v578: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 511 B/s wr, 29 op/s 2026-04-16T19:39:31.593 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for rgw.foo.vm04.uxumrv to start 2026-04-16T19:39:31.787 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:39:31.788 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (15m) 0s ago 16m 135M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:39:31.788 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (10m) 0s ago 16m 127M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:39:31.788 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 4m ago 16m 94.9M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:39:31.788 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 error 4m ago 16m - - 2026-04-16T19:39:32.093 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-16T19:39:32.093 INFO:teuthology.orchestra.run.vm01.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-16T19:39:32.093 INFO:teuthology.orchestra.run.vm01.stdout: daemon rgw.foo.vm04.uxumrv on vm04 is in error state 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.001414+0000 mon.vm01 (mon.0) 1327 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.001414+0000 mon.vm01 (mon.0) 1327 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.006955+0000 mon.vm01 (mon.0) 1328 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.006955+0000 mon.vm01 (mon.0) 1328 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.007828+0000 mon.vm01 (mon.0) 1329 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.007828+0000 mon.vm01 (mon.0) 1329 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.008273+0000 mon.vm01 (mon.0) 1330 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.008273+0000 mon.vm01 (mon.0) 1330 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: cluster 2026-04-16T19:39:31.009275+0000 mgr.vm01.nwhpas (mgr.14227) 1034 : cluster [DBG] pgmap v579: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 180 B/s rd, 361 B/s wr, 0 op/s 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: cluster 2026-04-16T19:39:31.009275+0000 mgr.vm01.nwhpas (mgr.14227) 1034 : cluster [DBG] pgmap v579: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 180 B/s rd, 361 B/s wr, 0 op/s 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: cluster 2026-04-16T19:39:31.009391+0000 mgr.vm01.nwhpas (mgr.14227) 1035 : cluster [DBG] pgmap v580: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 219 B/s rd, 438 B/s wr, 0 op/s 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: cluster 2026-04-16T19:39:31.009391+0000 mgr.vm01.nwhpas (mgr.14227) 1035 : cluster [DBG] pgmap v580: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 219 B/s rd, 438 B/s wr, 0 op/s 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.012242+0000 mon.vm01 (mon.0) 1331 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.012242+0000 mon.vm01 (mon.0) 1331 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.014008+0000 mon.vm01 (mon.0) 1332 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.014008+0000 mon.vm01 (mon.0) 1332 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.564429+0000 mon.vm01 (mon.0) 1333 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.564429+0000 mon.vm01 (mon.0) 1333 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.569352+0000 mon.vm01 (mon.0) 1334 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.569352+0000 mon.vm01 (mon.0) 1334 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.570036+0000 mgr.vm01.nwhpas (mgr.14227) 1036 : audit [DBG] from='client.16854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:32.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.570036+0000 mgr.vm01.nwhpas (mgr.14227) 1036 : audit [DBG] from='client.16854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:32.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.570133+0000 mon.vm01 (mon.0) 1335 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:32.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.570133+0000 mon.vm01 (mon.0) 1335 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:32.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.779126+0000 mgr.vm01.nwhpas (mgr.14227) 1037 : audit [DBG] from='client.16858 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:32.462 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:32 vm04 bash[34817]: audit 2026-04-16T19:39:31.779126+0000 mgr.vm01.nwhpas (mgr.14227) 1037 : audit [DBG] from='client.16858 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:32.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.001414+0000 mon.vm01 (mon.0) 1327 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.001414+0000 mon.vm01 (mon.0) 1327 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.006955+0000 mon.vm01 (mon.0) 1328 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.006955+0000 mon.vm01 (mon.0) 1328 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.007828+0000 mon.vm01 (mon.0) 1329 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.007828+0000 mon.vm01 (mon.0) 1329 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.008273+0000 mon.vm01 (mon.0) 1330 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.008273+0000 mon.vm01 (mon.0) 1330 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: cluster 2026-04-16T19:39:31.009275+0000 mgr.vm01.nwhpas (mgr.14227) 1034 : cluster [DBG] pgmap v579: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 180 B/s rd, 361 B/s wr, 0 op/s 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: cluster 2026-04-16T19:39:31.009275+0000 mgr.vm01.nwhpas (mgr.14227) 1034 : cluster [DBG] pgmap v579: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 180 B/s rd, 361 B/s wr, 0 op/s 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: cluster 2026-04-16T19:39:31.009391+0000 mgr.vm01.nwhpas (mgr.14227) 1035 : cluster [DBG] pgmap v580: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 219 B/s rd, 438 B/s wr, 0 op/s 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: cluster 2026-04-16T19:39:31.009391+0000 mgr.vm01.nwhpas (mgr.14227) 1035 : cluster [DBG] pgmap v580: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 219 B/s rd, 438 B/s wr, 0 op/s 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.012242+0000 mon.vm01 (mon.0) 1331 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.012242+0000 mon.vm01 (mon.0) 1331 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.014008+0000 mon.vm01 (mon.0) 1332 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.014008+0000 mon.vm01 (mon.0) 1332 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.564429+0000 mon.vm01 (mon.0) 1333 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.564429+0000 mon.vm01 (mon.0) 1333 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.569352+0000 mon.vm01 (mon.0) 1334 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.569352+0000 mon.vm01 (mon.0) 1334 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.570036+0000 mgr.vm01.nwhpas (mgr.14227) 1036 : audit [DBG] from='client.16854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.570036+0000 mgr.vm01.nwhpas (mgr.14227) 1036 : audit [DBG] from='client.16854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.570133+0000 mon.vm01 (mon.0) 1335 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.570133+0000 mon.vm01 (mon.0) 1335 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.779126+0000 mgr.vm01.nwhpas (mgr.14227) 1037 : audit [DBG] from='client.16858 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:32.464 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:32 vm01 bash[28222]: audit 2026-04-16T19:39:31.779126+0000 mgr.vm01.nwhpas (mgr.14227) 1037 : audit [DBG] from='client.16858 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:33.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:33 vm04 bash[34817]: audit 2026-04-16T19:39:32.086893+0000 mon.vm01 (mon.0) 1336 : audit [DBG] from='client.? 192.168.123.101:0/534552315' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:33.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:33 vm04 bash[34817]: audit 2026-04-16T19:39:32.086893+0000 mon.vm01 (mon.0) 1336 : audit [DBG] from='client.? 192.168.123.101:0/534552315' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:33.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:33 vm01 bash[28222]: audit 2026-04-16T19:39:32.086893+0000 mon.vm01 (mon.0) 1336 : audit [DBG] from='client.? 192.168.123.101:0/534552315' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:33.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:33 vm01 bash[28222]: audit 2026-04-16T19:39:32.086893+0000 mon.vm01 (mon.0) 1336 : audit [DBG] from='client.? 192.168.123.101:0/534552315' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:34.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:34 vm04 bash[34817]: cluster 2026-04-16T19:39:33.009775+0000 mgr.vm01.nwhpas (mgr.14227) 1038 : cluster [DBG] pgmap v581: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 219 B/s wr, 31 op/s 2026-04-16T19:39:34.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:34 vm04 bash[34817]: cluster 2026-04-16T19:39:33.009775+0000 mgr.vm01.nwhpas (mgr.14227) 1038 : cluster [DBG] pgmap v581: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 219 B/s wr, 31 op/s 2026-04-16T19:39:34.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:34 vm01 bash[28222]: cluster 2026-04-16T19:39:33.009775+0000 mgr.vm01.nwhpas (mgr.14227) 1038 : cluster [DBG] pgmap v581: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 219 B/s wr, 31 op/s 2026-04-16T19:39:34.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:34 vm01 bash[28222]: cluster 2026-04-16T19:39:33.009775+0000 mgr.vm01.nwhpas (mgr.14227) 1038 : cluster [DBG] pgmap v581: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 219 B/s wr, 31 op/s 2026-04-16T19:39:36.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:36 vm01 bash[28222]: cluster 2026-04-16T19:39:35.010184+0000 mgr.vm01.nwhpas (mgr.14227) 1039 : cluster [DBG] pgmap v582: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 219 B/s wr, 90 op/s 2026-04-16T19:39:36.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:36 vm01 bash[28222]: cluster 2026-04-16T19:39:35.010184+0000 mgr.vm01.nwhpas (mgr.14227) 1039 : cluster [DBG] pgmap v582: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 219 B/s wr, 90 op/s 2026-04-16T19:39:36.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:36 vm04 bash[34817]: cluster 2026-04-16T19:39:35.010184+0000 mgr.vm01.nwhpas (mgr.14227) 1039 : cluster [DBG] pgmap v582: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 219 B/s wr, 90 op/s 2026-04-16T19:39:36.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:36 vm04 bash[34817]: cluster 2026-04-16T19:39:35.010184+0000 mgr.vm01.nwhpas (mgr.14227) 1039 : cluster [DBG] pgmap v582: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 219 B/s wr, 90 op/s 2026-04-16T19:39:37.292 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (5s) 0s ago 16m 92.6M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 3c3b97b27d67 2026-04-16T19:39:37.293 INFO:teuthology.orchestra.run.vm01.stdout:Check with each haproxy down in turn... 2026-04-16T19:39:37.676 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to stop haproxy.rgw.foo.vm01.fvwjhu on host 'vm01' 2026-04-16T19:39:37.884 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for haproxy.rgw.foo.vm01.fvwjhu to stop 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.546745+0000 mon.vm01 (mon.0) 1337 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.546745+0000 mon.vm01 (mon.0) 1337 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.551660+0000 mon.vm01 (mon.0) 1338 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.551660+0000 mon.vm01 (mon.0) 1338 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.552377+0000 mon.vm01 (mon.0) 1339 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.552377+0000 mon.vm01 (mon.0) 1339 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.552811+0000 mon.vm01 (mon.0) 1340 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.552811+0000 mon.vm01 (mon.0) 1340 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: cluster 2026-04-16T19:39:36.553855+0000 mgr.vm01.nwhpas (mgr.14227) 1040 : cluster [DBG] pgmap v583: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 230 B/s wr, 147 op/s 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: cluster 2026-04-16T19:39:36.553855+0000 mgr.vm01.nwhpas (mgr.14227) 1040 : cluster [DBG] pgmap v583: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 230 B/s wr, 147 op/s 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.556881+0000 mon.vm01 (mon.0) 1341 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.556881+0000 mon.vm01 (mon.0) 1341 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.558274+0000 mon.vm01 (mon.0) 1342 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:37.954 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:37 vm01 bash[28222]: audit 2026-04-16T19:39:36.558274+0000 mon.vm01 (mon.0) 1342 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.546745+0000 mon.vm01 (mon.0) 1337 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.546745+0000 mon.vm01 (mon.0) 1337 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.551660+0000 mon.vm01 (mon.0) 1338 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.551660+0000 mon.vm01 (mon.0) 1338 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.552377+0000 mon.vm01 (mon.0) 1339 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.552377+0000 mon.vm01 (mon.0) 1339 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.552811+0000 mon.vm01 (mon.0) 1340 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.552811+0000 mon.vm01 (mon.0) 1340 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: cluster 2026-04-16T19:39:36.553855+0000 mgr.vm01.nwhpas (mgr.14227) 1040 : cluster [DBG] pgmap v583: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 230 B/s wr, 147 op/s 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: cluster 2026-04-16T19:39:36.553855+0000 mgr.vm01.nwhpas (mgr.14227) 1040 : cluster [DBG] pgmap v583: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 230 B/s wr, 147 op/s 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.556881+0000 mon.vm01 (mon.0) 1341 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.556881+0000 mon.vm01 (mon.0) 1341 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.558274+0000 mon.vm01 (mon.0) 1342 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:37.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:37 vm04 bash[34817]: audit 2026-04-16T19:39:36.558274+0000 mon.vm01 (mon.0) 1342 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:38.074 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:39:38.074 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm01.fvwjhu vm01 *:9000,9001 running (16m) 7s ago 16m 4167k - 2.3.17-d1c9119 5479ac79e01f 21f358ccf387 2026-04-16T19:39:38.074 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm04.bfwsbq vm04 *:9000,9001 running (16m) 1s ago 16m 4219k - 2.3.17-d1c9119 5479ac79e01f c6f49f693226 2026-04-16T19:39:38.306 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:39:38.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.271913+0000 mgr.vm01.nwhpas (mgr.14227) 1041 : audit [DBG] from='client.16874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.271913+0000 mgr.vm01.nwhpas (mgr.14227) 1041 : audit [DBG] from='client.16874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.460808+0000 mgr.vm01.nwhpas (mgr.14227) 1042 : audit [DBG] from='client.16878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.460808+0000 mgr.vm01.nwhpas (mgr.14227) 1042 : audit [DBG] from='client.16878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: cluster 2026-04-16T19:39:37.551722+0000 mon.vm01 (mon.0) 1343 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:39:38.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: cluster 2026-04-16T19:39:37.551722+0000 mon.vm01 (mon.0) 1343 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: cluster 2026-04-16T19:39:37.551750+0000 mon.vm01 (mon.0) 1344 : cluster [INF] Cluster is now healthy 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: cluster 2026-04-16T19:39:37.551750+0000 mon.vm01 (mon.0) 1344 : cluster [INF] Cluster is now healthy 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.591680+0000 mon.vm01 (mon.0) 1345 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.591680+0000 mon.vm01 (mon.0) 1345 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.592315+0000 mon.vm01 (mon.0) 1346 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.592315+0000 mon.vm01 (mon.0) 1346 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.659251+0000 mgr.vm01.nwhpas (mgr.14227) 1043 : audit [DBG] from='client.16882 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm01.fvwjhu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.659251+0000 mgr.vm01.nwhpas (mgr.14227) 1043 : audit [DBG] from='client.16882 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm01.fvwjhu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: cephadm 2026-04-16T19:39:37.659649+0000 mgr.vm01.nwhpas (mgr.14227) 1044 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm01.fvwjhu 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: cephadm 2026-04-16T19:39:37.659649+0000 mgr.vm01.nwhpas (mgr.14227) 1044 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm01.fvwjhu 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.665308+0000 mon.vm01 (mon.0) 1347 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.665308+0000 mon.vm01 (mon.0) 1347 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.670497+0000 mon.vm01 (mon.0) 1348 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.670497+0000 mon.vm01 (mon.0) 1348 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.671221+0000 mon.vm01 (mon.0) 1349 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.671221+0000 mon.vm01 (mon.0) 1349 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.672309+0000 mon.vm01 (mon.0) 1350 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.672309+0000 mon.vm01 (mon.0) 1350 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.672730+0000 mon.vm01 (mon.0) 1351 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.672730+0000 mon.vm01 (mon.0) 1351 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.676901+0000 mon.vm01 (mon.0) 1352 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.676901+0000 mon.vm01 (mon.0) 1352 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.678253+0000 mon.vm01 (mon.0) 1353 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.678253+0000 mon.vm01 (mon.0) 1353 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.859072+0000 mgr.vm01.nwhpas (mgr.14227) 1045 : audit [DBG] from='client.16886 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:37.859072+0000 mgr.vm01.nwhpas (mgr.14227) 1045 : audit [DBG] from='client.16886 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:38.152555+0000 mon.vm01 (mon.0) 1354 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:38.152555+0000 mon.vm01 (mon.0) 1354 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:38.157991+0000 mon.vm01 (mon.0) 1355 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:38.157991+0000 mon.vm01 (mon.0) 1355 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:38.159253+0000 mon.vm01 (mon.0) 1356 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:38.159253+0000 mon.vm01 (mon.0) 1356 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:38.300965+0000 mon.vm01 (mon.0) 1357 : audit [DBG] from='client.? 192.168.123.101:0/1086407623' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:38.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:38 vm04 bash[34817]: audit 2026-04-16T19:39:38.300965+0000 mon.vm01 (mon.0) 1357 : audit [DBG] from='client.? 192.168.123.101:0/1086407623' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.271913+0000 mgr.vm01.nwhpas (mgr.14227) 1041 : audit [DBG] from='client.16874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.271913+0000 mgr.vm01.nwhpas (mgr.14227) 1041 : audit [DBG] from='client.16874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.460808+0000 mgr.vm01.nwhpas (mgr.14227) 1042 : audit [DBG] from='client.16878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.460808+0000 mgr.vm01.nwhpas (mgr.14227) 1042 : audit [DBG] from='client.16878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: cluster 2026-04-16T19:39:37.551722+0000 mon.vm01 (mon.0) 1343 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: cluster 2026-04-16T19:39:37.551722+0000 mon.vm01 (mon.0) 1343 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: cluster 2026-04-16T19:39:37.551750+0000 mon.vm01 (mon.0) 1344 : cluster [INF] Cluster is now healthy 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: cluster 2026-04-16T19:39:37.551750+0000 mon.vm01 (mon.0) 1344 : cluster [INF] Cluster is now healthy 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.591680+0000 mon.vm01 (mon.0) 1345 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.591680+0000 mon.vm01 (mon.0) 1345 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.592315+0000 mon.vm01 (mon.0) 1346 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.592315+0000 mon.vm01 (mon.0) 1346 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.659251+0000 mgr.vm01.nwhpas (mgr.14227) 1043 : audit [DBG] from='client.16882 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm01.fvwjhu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.659251+0000 mgr.vm01.nwhpas (mgr.14227) 1043 : audit [DBG] from='client.16882 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm01.fvwjhu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: cephadm 2026-04-16T19:39:37.659649+0000 mgr.vm01.nwhpas (mgr.14227) 1044 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm01.fvwjhu 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: cephadm 2026-04-16T19:39:37.659649+0000 mgr.vm01.nwhpas (mgr.14227) 1044 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm01.fvwjhu 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.665308+0000 mon.vm01 (mon.0) 1347 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.665308+0000 mon.vm01 (mon.0) 1347 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.670497+0000 mon.vm01 (mon.0) 1348 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.670497+0000 mon.vm01 (mon.0) 1348 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.671221+0000 mon.vm01 (mon.0) 1349 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.671221+0000 mon.vm01 (mon.0) 1349 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.672309+0000 mon.vm01 (mon.0) 1350 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.672309+0000 mon.vm01 (mon.0) 1350 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.672730+0000 mon.vm01 (mon.0) 1351 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.672730+0000 mon.vm01 (mon.0) 1351 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.676901+0000 mon.vm01 (mon.0) 1352 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.676901+0000 mon.vm01 (mon.0) 1352 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.678253+0000 mon.vm01 (mon.0) 1353 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.678253+0000 mon.vm01 (mon.0) 1353 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.859072+0000 mgr.vm01.nwhpas (mgr.14227) 1045 : audit [DBG] from='client.16886 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:37.859072+0000 mgr.vm01.nwhpas (mgr.14227) 1045 : audit [DBG] from='client.16886 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:38.152555+0000 mon.vm01 (mon.0) 1354 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:38.152555+0000 mon.vm01 (mon.0) 1354 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:38.157991+0000 mon.vm01 (mon.0) 1355 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:38.157991+0000 mon.vm01 (mon.0) 1355 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:38.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:38.159253+0000 mon.vm01 (mon.0) 1356 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:38.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:38.159253+0000 mon.vm01 (mon.0) 1356 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:38.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:38.300965+0000 mon.vm01 (mon.0) 1357 : audit [DBG] from='client.? 192.168.123.101:0/1086407623' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:38.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:38 vm01 bash[28222]: audit 2026-04-16T19:39:38.300965+0000 mon.vm01 (mon.0) 1357 : audit [DBG] from='client.? 192.168.123.101:0/1086407623' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:39.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:39 vm01 bash[28222]: audit 2026-04-16T19:39:38.065806+0000 mgr.vm01.nwhpas (mgr.14227) 1046 : audit [DBG] from='client.16890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:39.714 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:39 vm01 bash[28222]: audit 2026-04-16T19:39:38.065806+0000 mgr.vm01.nwhpas (mgr.14227) 1046 : audit [DBG] from='client.16890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:39.714 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:39 vm01 bash[28222]: cluster 2026-04-16T19:39:38.554166+0000 mgr.vm01.nwhpas (mgr.14227) 1047 : cluster [DBG] pgmap v584: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 0 B/s wr, 147 op/s 2026-04-16T19:39:39.714 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:39 vm01 bash[28222]: cluster 2026-04-16T19:39:38.554166+0000 mgr.vm01.nwhpas (mgr.14227) 1047 : cluster [DBG] pgmap v584: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 0 B/s wr, 147 op/s 2026-04-16T19:39:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:39 vm04 bash[34817]: audit 2026-04-16T19:39:38.065806+0000 mgr.vm01.nwhpas (mgr.14227) 1046 : audit [DBG] from='client.16890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:39 vm04 bash[34817]: audit 2026-04-16T19:39:38.065806+0000 mgr.vm01.nwhpas (mgr.14227) 1046 : audit [DBG] from='client.16890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:39 vm04 bash[34817]: cluster 2026-04-16T19:39:38.554166+0000 mgr.vm01.nwhpas (mgr.14227) 1047 : cluster [DBG] pgmap v584: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 0 B/s wr, 147 op/s 2026-04-16T19:39:39.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:39 vm04 bash[34817]: cluster 2026-04-16T19:39:38.554166+0000 mgr.vm01.nwhpas (mgr.14227) 1047 : cluster [DBG] pgmap v584: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 90 KiB/s rd, 0 B/s wr, 147 op/s 2026-04-16T19:39:41.713 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:41 vm01 bash[28222]: cluster 2026-04-16T19:39:40.554696+0000 mgr.vm01.nwhpas (mgr.14227) 1048 : cluster [DBG] pgmap v585: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 84 KiB/s rd, 0 B/s wr, 136 op/s 2026-04-16T19:39:41.714 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:41 vm01 bash[28222]: cluster 2026-04-16T19:39:40.554696+0000 mgr.vm01.nwhpas (mgr.14227) 1048 : cluster [DBG] pgmap v585: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 84 KiB/s rd, 0 B/s wr, 136 op/s 2026-04-16T19:39:41.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:41 vm04 bash[34817]: cluster 2026-04-16T19:39:40.554696+0000 mgr.vm01.nwhpas (mgr.14227) 1048 : cluster [DBG] pgmap v585: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 84 KiB/s rd, 0 B/s wr, 136 op/s 2026-04-16T19:39:41.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:41 vm04 bash[34817]: cluster 2026-04-16T19:39:40.554696+0000 mgr.vm01.nwhpas (mgr.14227) 1048 : cluster [DBG] pgmap v585: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 84 KiB/s rd, 0 B/s wr, 136 op/s 2026-04-16T19:39:43.504 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm01.fvwjhu vm01 *:9000,9001 stopped 0s ago 16m - - 2026-04-16T19:39:43.508 INFO:teuthology.orchestra.run.vm01.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-16T19:39:43.508 INFO:teuthology.orchestra.run.vm01.stderr: Dload Upload Total Spent Left Speed 2026-04-16T19:39:43.508 INFO:teuthology.orchestra.run.vm01.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 2026-04-16T19:39:43.508 INFO:teuthology.orchestra.run.vm01.stderr:curl: (7) Failed to connect to 12.12.1.101 port 9000: Connection refused 2026-04-16T19:39:43.509 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for http://12.12.1.101:9000/ to be available 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: cluster 2026-04-16T19:39:42.555053+0000 mgr.vm01.nwhpas (mgr.14227) 1049 : cluster [DBG] pgmap v586: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 69 KiB/s rd, 0 B/s wr, 113 op/s 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: cluster 2026-04-16T19:39:42.555053+0000 mgr.vm01.nwhpas (mgr.14227) 1049 : cluster [DBG] pgmap v586: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 69 KiB/s rd, 0 B/s wr, 113 op/s 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.329717+0000 mon.vm01 (mon.0) 1358 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.329717+0000 mon.vm01 (mon.0) 1358 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.335522+0000 mon.vm01 (mon.0) 1359 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.335522+0000 mon.vm01 (mon.0) 1359 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.336356+0000 mon.vm01 (mon.0) 1360 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.336356+0000 mon.vm01 (mon.0) 1360 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.336837+0000 mon.vm01 (mon.0) 1361 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.336837+0000 mon.vm01 (mon.0) 1361 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.340494+0000 mon.vm01 (mon.0) 1362 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.340494+0000 mon.vm01 (mon.0) 1362 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.341633+0000 mon.vm01 (mon.0) 1363 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:43.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:43 vm04 bash[34817]: audit 2026-04-16T19:39:43.341633+0000 mon.vm01 (mon.0) 1363 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: cluster 2026-04-16T19:39:42.555053+0000 mgr.vm01.nwhpas (mgr.14227) 1049 : cluster [DBG] pgmap v586: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 69 KiB/s rd, 0 B/s wr, 113 op/s 2026-04-16T19:39:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: cluster 2026-04-16T19:39:42.555053+0000 mgr.vm01.nwhpas (mgr.14227) 1049 : cluster [DBG] pgmap v586: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 69 KiB/s rd, 0 B/s wr, 113 op/s 2026-04-16T19:39:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.329717+0000 mon.vm01 (mon.0) 1358 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.329717+0000 mon.vm01 (mon.0) 1358 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.335522+0000 mon.vm01 (mon.0) 1359 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.335522+0000 mon.vm01 (mon.0) 1359 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.336356+0000 mon.vm01 (mon.0) 1360 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.336356+0000 mon.vm01 (mon.0) 1360 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.336837+0000 mon.vm01 (mon.0) 1361 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.336837+0000 mon.vm01 (mon.0) 1361 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.340494+0000 mon.vm01 (mon.0) 1362 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.340494+0000 mon.vm01 (mon.0) 1362 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.341633+0000 mon.vm01 (mon.0) 1363 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:43.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:43 vm01 bash[28222]: audit 2026-04-16T19:39:43.341633+0000 mon.vm01 (mon.0) 1363 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:44.512 INFO:teuthology.orchestra.run.vm01.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-16T19:39:44.512 INFO:teuthology.orchestra.run.vm01.stderr: Dload Upload Total Spent Left Speed 2026-04-16T19:39:44.512 INFO:teuthology.orchestra.run.vm01.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 2026-04-16T19:39:44.512 INFO:teuthology.orchestra.run.vm01.stderr:curl: (7) Failed to connect to 12.12.1.101 port 9000: Connection refused 2026-04-16T19:39:44.513 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for http://12.12.1.101:9000/ to be available 2026-04-16T19:39:44.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:44 vm04 bash[34817]: audit 2026-04-16T19:39:43.482495+0000 mgr.vm01.nwhpas (mgr.14227) 1050 : audit [DBG] from='client.16898 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:44.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:44 vm04 bash[34817]: audit 2026-04-16T19:39:43.482495+0000 mgr.vm01.nwhpas (mgr.14227) 1050 : audit [DBG] from='client.16898 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:44.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:44 vm01 bash[28222]: audit 2026-04-16T19:39:43.482495+0000 mgr.vm01.nwhpas (mgr.14227) 1050 : audit [DBG] from='client.16898 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:44.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:44 vm01 bash[28222]: audit 2026-04-16T19:39:43.482495+0000 mgr.vm01.nwhpas (mgr.14227) 1050 : audit [DBG] from='client.16898 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:45.516 INFO:teuthology.orchestra.run.vm01.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-16T19:39:45.516 INFO:teuthology.orchestra.run.vm01.stderr: Dload Upload Total Spent Left Speed 2026-04-16T19:39:45.517 INFO:teuthology.orchestra.run.vm01.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-16T19:39:45.705 INFO:teuthology.orchestra.run.vm01.stdout:anonymousScheduled to start haproxy.rgw.foo.vm01.fvwjhu on host 'vm01' 2026-04-16T19:39:45.919 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for haproxy.rgw.foo.vm01.fvwjhu to start 2026-04-16T19:39:45.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:45 vm01 bash[28222]: cluster 2026-04-16T19:39:44.555487+0000 mgr.vm01.nwhpas (mgr.14227) 1051 : cluster [DBG] pgmap v587: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 54 KiB/s rd, 0 B/s wr, 88 op/s 2026-04-16T19:39:45.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:45 vm01 bash[28222]: cluster 2026-04-16T19:39:44.555487+0000 mgr.vm01.nwhpas (mgr.14227) 1051 : cluster [DBG] pgmap v587: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 54 KiB/s rd, 0 B/s wr, 88 op/s 2026-04-16T19:39:45.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:45 vm01 bash[28222]: audit 2026-04-16T19:39:45.693943+0000 mon.vm01 (mon.0) 1364 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:45.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:45 vm01 bash[28222]: audit 2026-04-16T19:39:45.693943+0000 mon.vm01 (mon.0) 1364 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:45.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:45 vm01 bash[28222]: audit 2026-04-16T19:39:45.698661+0000 mon.vm01 (mon.0) 1365 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:45.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:45 vm01 bash[28222]: audit 2026-04-16T19:39:45.698661+0000 mon.vm01 (mon.0) 1365 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:45.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:45 vm01 bash[28222]: audit 2026-04-16T19:39:45.699435+0000 mon.vm01 (mon.0) 1366 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:45.965 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:45 vm01 bash[28222]: audit 2026-04-16T19:39:45.699435+0000 mon.vm01 (mon.0) 1366 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:46.097 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:39:46.097 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm01.fvwjhu vm01 *:9000,9001 stopped 2s ago 16m - - 2026-04-16T19:39:46.097 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm04.bfwsbq vm04 *:9000,9001 running (16m) 9s ago 16m 4219k - 2.3.17-d1c9119 5479ac79e01f c6f49f693226 2026-04-16T19:39:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:45 vm04 bash[34817]: cluster 2026-04-16T19:39:44.555487+0000 mgr.vm01.nwhpas (mgr.14227) 1051 : cluster [DBG] pgmap v587: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 54 KiB/s rd, 0 B/s wr, 88 op/s 2026-04-16T19:39:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:45 vm04 bash[34817]: cluster 2026-04-16T19:39:44.555487+0000 mgr.vm01.nwhpas (mgr.14227) 1051 : cluster [DBG] pgmap v587: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 54 KiB/s rd, 0 B/s wr, 88 op/s 2026-04-16T19:39:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:45 vm04 bash[34817]: audit 2026-04-16T19:39:45.693943+0000 mon.vm01 (mon.0) 1364 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:45 vm04 bash[34817]: audit 2026-04-16T19:39:45.693943+0000 mon.vm01 (mon.0) 1364 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:45 vm04 bash[34817]: audit 2026-04-16T19:39:45.698661+0000 mon.vm01 (mon.0) 1365 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:45 vm04 bash[34817]: audit 2026-04-16T19:39:45.698661+0000 mon.vm01 (mon.0) 1365 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:45 vm04 bash[34817]: audit 2026-04-16T19:39:45.699435+0000 mon.vm01 (mon.0) 1366 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:46.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:45 vm04 bash[34817]: audit 2026-04-16T19:39:45.699435+0000 mon.vm01 (mon.0) 1366 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:46.317 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:39:46.725 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:45.687710+0000 mgr.vm01.nwhpas (mgr.14227) 1052 : audit [DBG] from='client.16902 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm01.fvwjhu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:46.725 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:45.687710+0000 mgr.vm01.nwhpas (mgr.14227) 1052 : audit [DBG] from='client.16902 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm01.fvwjhu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:46.725 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: cephadm 2026-04-16T19:39:45.688069+0000 mgr.vm01.nwhpas (mgr.14227) 1053 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm01.fvwjhu 2026-04-16T19:39:46.725 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: cephadm 2026-04-16T19:39:45.688069+0000 mgr.vm01.nwhpas (mgr.14227) 1053 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm01.fvwjhu 2026-04-16T19:39:46.725 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:45.897379+0000 mgr.vm01.nwhpas (mgr.14227) 1054 : audit [DBG] from='client.16906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:46.725 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:45.897379+0000 mgr.vm01.nwhpas (mgr.14227) 1054 : audit [DBG] from='client.16906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:46.725 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.039904+0000 mon.vm01 (mon.0) 1367 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:46.725 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.039904+0000 mon.vm01 (mon.0) 1367 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:46.725 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.040392+0000 mon.vm01 (mon.0) 1368 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:46.726 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.040392+0000 mon.vm01 (mon.0) 1368 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:46.726 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.044630+0000 mon.vm01 (mon.0) 1369 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.726 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.044630+0000 mon.vm01 (mon.0) 1369 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.726 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.045829+0000 mon.vm01 (mon.0) 1370 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:46.726 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.045829+0000 mon.vm01 (mon.0) 1370 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:46.726 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.311534+0000 mon.vm01 (mon.0) 1371 : audit [DBG] from='client.? 192.168.123.101:0/1545763229' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:46.726 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.311534+0000 mon.vm01 (mon.0) 1371 : audit [DBG] from='client.? 192.168.123.101:0/1545763229' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:46.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:45.687710+0000 mgr.vm01.nwhpas (mgr.14227) 1052 : audit [DBG] from='client.16902 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm01.fvwjhu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:45.687710+0000 mgr.vm01.nwhpas (mgr.14227) 1052 : audit [DBG] from='client.16902 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm01.fvwjhu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: cephadm 2026-04-16T19:39:45.688069+0000 mgr.vm01.nwhpas (mgr.14227) 1053 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm01.fvwjhu 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: cephadm 2026-04-16T19:39:45.688069+0000 mgr.vm01.nwhpas (mgr.14227) 1053 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm01.fvwjhu 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:45.897379+0000 mgr.vm01.nwhpas (mgr.14227) 1054 : audit [DBG] from='client.16906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:45.897379+0000 mgr.vm01.nwhpas (mgr.14227) 1054 : audit [DBG] from='client.16906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.039904+0000 mon.vm01 (mon.0) 1367 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.039904+0000 mon.vm01 (mon.0) 1367 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.040392+0000 mon.vm01 (mon.0) 1368 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.040392+0000 mon.vm01 (mon.0) 1368 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.044630+0000 mon.vm01 (mon.0) 1369 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.044630+0000 mon.vm01 (mon.0) 1369 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.045829+0000 mon.vm01 (mon.0) 1370 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.045829+0000 mon.vm01 (mon.0) 1370 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.311534+0000 mon.vm01 (mon.0) 1371 : audit [DBG] from='client.? 192.168.123.101:0/1545763229' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.311534+0000 mon.vm01 (mon.0) 1371 : audit [DBG] from='client.? 192.168.123.101:0/1545763229' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.597374+0000 mon.vm01 (mon.0) 1372 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.597374+0000 mon.vm01 (mon.0) 1372 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.602756+0000 mon.vm01 (mon.0) 1373 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.602756+0000 mon.vm01 (mon.0) 1373 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.604564+0000 mon.vm01 (mon.0) 1374 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:46.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:46 vm01 bash[28222]: audit 2026-04-16T19:39:46.604564+0000 mon.vm01 (mon.0) 1374 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.597374+0000 mon.vm01 (mon.0) 1372 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.597374+0000 mon.vm01 (mon.0) 1372 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.602756+0000 mon.vm01 (mon.0) 1373 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.602756+0000 mon.vm01 (mon.0) 1373 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.604564+0000 mon.vm01 (mon.0) 1374 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:47.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:46 vm04 bash[34817]: audit 2026-04-16T19:39:46.604564+0000 mon.vm01 (mon.0) 1374 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:47 vm04 bash[34817]: audit 2026-04-16T19:39:46.089167+0000 mgr.vm01.nwhpas (mgr.14227) 1055 : audit [DBG] from='client.16910 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:47 vm04 bash[34817]: audit 2026-04-16T19:39:46.089167+0000 mgr.vm01.nwhpas (mgr.14227) 1055 : audit [DBG] from='client.16910 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:47 vm04 bash[34817]: cluster 2026-04-16T19:39:46.555972+0000 mgr.vm01.nwhpas (mgr.14227) 1056 : cluster [DBG] pgmap v588: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 0 B/s wr, 40 op/s 2026-04-16T19:39:48.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:47 vm04 bash[34817]: cluster 2026-04-16T19:39:46.555972+0000 mgr.vm01.nwhpas (mgr.14227) 1056 : cluster [DBG] pgmap v588: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 0 B/s wr, 40 op/s 2026-04-16T19:39:48.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:47 vm01 bash[28222]: audit 2026-04-16T19:39:46.089167+0000 mgr.vm01.nwhpas (mgr.14227) 1055 : audit [DBG] from='client.16910 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:48.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:47 vm01 bash[28222]: audit 2026-04-16T19:39:46.089167+0000 mgr.vm01.nwhpas (mgr.14227) 1055 : audit [DBG] from='client.16910 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:48.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:47 vm01 bash[28222]: cluster 2026-04-16T19:39:46.555972+0000 mgr.vm01.nwhpas (mgr.14227) 1056 : cluster [DBG] pgmap v588: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 0 B/s wr, 40 op/s 2026-04-16T19:39:48.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:47 vm01 bash[28222]: cluster 2026-04-16T19:39:46.555972+0000 mgr.vm01.nwhpas (mgr.14227) 1056 : cluster [DBG] pgmap v588: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 0 B/s wr, 40 op/s 2026-04-16T19:39:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:49 vm04 bash[34817]: cluster 2026-04-16T19:39:48.556369+0000 mgr.vm01.nwhpas (mgr.14227) 1057 : cluster [DBG] pgmap v589: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:50.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:49 vm04 bash[34817]: cluster 2026-04-16T19:39:48.556369+0000 mgr.vm01.nwhpas (mgr.14227) 1057 : cluster [DBG] pgmap v589: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:49 vm01 bash[28222]: cluster 2026-04-16T19:39:48.556369+0000 mgr.vm01.nwhpas (mgr.14227) 1057 : cluster [DBG] pgmap v589: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:50.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:49 vm01 bash[28222]: cluster 2026-04-16T19:39:48.556369+0000 mgr.vm01.nwhpas (mgr.14227) 1057 : cluster [DBG] pgmap v589: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:51.522 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for haproxy.rgw.foo.vm01.fvwjhu to start 2026-04-16T19:39:51.726 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:39:51.727 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm01.fvwjhu vm01 *:9000,9001 stopped 8s ago 16m - - 2026-04-16T19:39:51.727 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm04.bfwsbq vm04 *:9000,9001 running (16m) 15s ago 16m 4219k - 2.3.17-d1c9119 5479ac79e01f c6f49f693226 2026-04-16T19:39:51.965 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:39:52.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:51 vm04 bash[34817]: cluster 2026-04-16T19:39:50.556772+0000 mgr.vm01.nwhpas (mgr.14227) 1058 : cluster [DBG] pgmap v590: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:52.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:51 vm04 bash[34817]: cluster 2026-04-16T19:39:50.556772+0000 mgr.vm01.nwhpas (mgr.14227) 1058 : cluster [DBG] pgmap v590: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:52.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:51 vm01 bash[28222]: cluster 2026-04-16T19:39:50.556772+0000 mgr.vm01.nwhpas (mgr.14227) 1058 : cluster [DBG] pgmap v590: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:52.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:51 vm01 bash[28222]: cluster 2026-04-16T19:39:50.556772+0000 mgr.vm01.nwhpas (mgr.14227) 1058 : cluster [DBG] pgmap v590: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.499513+0000 mgr.vm01.nwhpas (mgr.14227) 1059 : audit [DBG] from='client.16918 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.499513+0000 mgr.vm01.nwhpas (mgr.14227) 1059 : audit [DBG] from='client.16918 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.715927+0000 mgr.vm01.nwhpas (mgr.14227) 1060 : audit [DBG] from='client.16922 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.715927+0000 mgr.vm01.nwhpas (mgr.14227) 1060 : audit [DBG] from='client.16922 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.863153+0000 mon.vm01 (mon.0) 1375 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.863153+0000 mon.vm01 (mon.0) 1375 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.871034+0000 mon.vm01 (mon.0) 1376 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.871034+0000 mon.vm01 (mon.0) 1376 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.872143+0000 mon.vm01 (mon.0) 1377 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.872143+0000 mon.vm01 (mon.0) 1377 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.872665+0000 mon.vm01 (mon.0) 1378 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.872665+0000 mon.vm01 (mon.0) 1378 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.875757+0000 mon.vm01 (mon.0) 1379 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.875757+0000 mon.vm01 (mon.0) 1379 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.876950+0000 mon.vm01 (mon.0) 1380 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.876950+0000 mon.vm01 (mon.0) 1380 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.959506+0000 mon.vm04 (mon.1) 42 : audit [DBG] from='client.? 192.168.123.101:0/3198275660' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:51.959506+0000 mon.vm04 (mon.1) 42 : audit [DBG] from='client.? 192.168.123.101:0/3198275660' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:52.588082+0000 mon.vm01 (mon.0) 1381 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:53.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:52 vm04 bash[34817]: audit 2026-04-16T19:39:52.588082+0000 mon.vm01 (mon.0) 1381 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.499513+0000 mgr.vm01.nwhpas (mgr.14227) 1059 : audit [DBG] from='client.16918 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.499513+0000 mgr.vm01.nwhpas (mgr.14227) 1059 : audit [DBG] from='client.16918 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.715927+0000 mgr.vm01.nwhpas (mgr.14227) 1060 : audit [DBG] from='client.16922 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.715927+0000 mgr.vm01.nwhpas (mgr.14227) 1060 : audit [DBG] from='client.16922 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.863153+0000 mon.vm01 (mon.0) 1375 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.863153+0000 mon.vm01 (mon.0) 1375 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.871034+0000 mon.vm01 (mon.0) 1376 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.871034+0000 mon.vm01 (mon.0) 1376 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.872143+0000 mon.vm01 (mon.0) 1377 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.872143+0000 mon.vm01 (mon.0) 1377 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.872665+0000 mon.vm01 (mon.0) 1378 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.872665+0000 mon.vm01 (mon.0) 1378 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.875757+0000 mon.vm01 (mon.0) 1379 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.875757+0000 mon.vm01 (mon.0) 1379 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.876950+0000 mon.vm01 (mon.0) 1380 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.876950+0000 mon.vm01 (mon.0) 1380 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:53.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.959506+0000 mon.vm04 (mon.1) 42 : audit [DBG] from='client.? 192.168.123.101:0/3198275660' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:51.959506+0000 mon.vm04 (mon.1) 42 : audit [DBG] from='client.? 192.168.123.101:0/3198275660' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:52.588082+0000 mon.vm01 (mon.0) 1381 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:53.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:52 vm01 bash[28222]: audit 2026-04-16T19:39:52.588082+0000 mon.vm01 (mon.0) 1381 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:39:54.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:53 vm04 bash[34817]: cluster 2026-04-16T19:39:52.557128+0000 mgr.vm01.nwhpas (mgr.14227) 1061 : cluster [DBG] pgmap v591: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:54.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:53 vm04 bash[34817]: cluster 2026-04-16T19:39:52.557128+0000 mgr.vm01.nwhpas (mgr.14227) 1061 : cluster [DBG] pgmap v591: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:54.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:53 vm01 bash[28222]: cluster 2026-04-16T19:39:52.557128+0000 mgr.vm01.nwhpas (mgr.14227) 1061 : cluster [DBG] pgmap v591: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:54.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:53 vm01 bash[28222]: cluster 2026-04-16T19:39:52.557128+0000 mgr.vm01.nwhpas (mgr.14227) 1061 : cluster [DBG] pgmap v591: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:39:56.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:55 vm04 bash[34817]: cluster 2026-04-16T19:39:54.557596+0000 mgr.vm01.nwhpas (mgr.14227) 1062 : cluster [DBG] pgmap v592: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:39:56.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:55 vm04 bash[34817]: cluster 2026-04-16T19:39:54.557596+0000 mgr.vm01.nwhpas (mgr.14227) 1062 : cluster [DBG] pgmap v592: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:39:56.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:55 vm01 bash[28222]: cluster 2026-04-16T19:39:54.557596+0000 mgr.vm01.nwhpas (mgr.14227) 1062 : cluster [DBG] pgmap v592: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:39:56.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:55 vm01 bash[28222]: cluster 2026-04-16T19:39:54.557596+0000 mgr.vm01.nwhpas (mgr.14227) 1062 : cluster [DBG] pgmap v592: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:39:57.166 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm01.fvwjhu vm01 *:9000,9001 running (10s) 5s ago 16m 3995k - 2.3.17-d1c9119 5479ac79e01f a67408202994 2026-04-16T19:39:57.370 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to stop haproxy.rgw.foo.vm04.bfwsbq on host 'vm04' 2026-04-16T19:39:57.575 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for haproxy.rgw.foo.vm04.bfwsbq to stop 2026-04-16T19:39:57.751 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:39:57.751 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm01.fvwjhu vm01 *:9000,9001 running (11s) 5s ago 16m 3995k - 2.3.17-d1c9119 5479ac79e01f a67408202994 2026-04-16T19:39:57.751 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm04.bfwsbq vm04 *:9000,9001 running (16m) 21s ago 16m 4219k - 2.3.17-d1c9119 5479ac79e01f c6f49f693226 2026-04-16T19:39:57.990 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: cluster 2026-04-16T19:39:56.557944+0000 mgr.vm01.nwhpas (mgr.14227) 1063 : cluster [DBG] pgmap v593: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: cluster 2026-04-16T19:39:56.557944+0000 mgr.vm01.nwhpas (mgr.14227) 1063 : cluster [DBG] pgmap v593: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.359510+0000 mon.vm01 (mon.0) 1382 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.359510+0000 mon.vm01 (mon.0) 1382 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.364394+0000 mon.vm01 (mon.0) 1383 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.364394+0000 mon.vm01 (mon.0) 1383 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.365402+0000 mon.vm01 (mon.0) 1384 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.365402+0000 mon.vm01 (mon.0) 1384 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.366306+0000 mon.vm01 (mon.0) 1385 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.366306+0000 mon.vm01 (mon.0) 1385 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.366664+0000 mon.vm01 (mon.0) 1386 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.366664+0000 mon.vm01 (mon.0) 1386 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.369806+0000 mon.vm01 (mon.0) 1387 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.369806+0000 mon.vm01 (mon.0) 1387 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.371009+0000 mon.vm01 (mon.0) 1388 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.371009+0000 mon.vm01 (mon.0) 1388 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.805967+0000 mon.vm01 (mon.0) 1389 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.805967+0000 mon.vm01 (mon.0) 1389 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.812951+0000 mon.vm01 (mon.0) 1390 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.812951+0000 mon.vm01 (mon.0) 1390 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.813982+0000 mon.vm01 (mon.0) 1391 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:58.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:57 vm04 bash[34817]: audit 2026-04-16T19:39:57.813982+0000 mon.vm01 (mon.0) 1391 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: cluster 2026-04-16T19:39:56.557944+0000 mgr.vm01.nwhpas (mgr.14227) 1063 : cluster [DBG] pgmap v593: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: cluster 2026-04-16T19:39:56.557944+0000 mgr.vm01.nwhpas (mgr.14227) 1063 : cluster [DBG] pgmap v593: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.359510+0000 mon.vm01 (mon.0) 1382 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.359510+0000 mon.vm01 (mon.0) 1382 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.364394+0000 mon.vm01 (mon.0) 1383 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.364394+0000 mon.vm01 (mon.0) 1383 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.365402+0000 mon.vm01 (mon.0) 1384 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.365402+0000 mon.vm01 (mon.0) 1384 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.366306+0000 mon.vm01 (mon.0) 1385 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.366306+0000 mon.vm01 (mon.0) 1385 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.366664+0000 mon.vm01 (mon.0) 1386 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.366664+0000 mon.vm01 (mon.0) 1386 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.369806+0000 mon.vm01 (mon.0) 1387 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.369806+0000 mon.vm01 (mon.0) 1387 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.371009+0000 mon.vm01 (mon.0) 1388 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:58.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.371009+0000 mon.vm01 (mon.0) 1388 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:39:58.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.805967+0000 mon.vm01 (mon.0) 1389 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.805967+0000 mon.vm01 (mon.0) 1389 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.812951+0000 mon.vm01 (mon.0) 1390 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.812951+0000 mon.vm01 (mon.0) 1390 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:39:58.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.813982+0000 mon.vm01 (mon.0) 1391 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:58.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:57 vm01 bash[28222]: audit 2026-04-16T19:39:57.813982+0000 mon.vm01 (mon.0) 1391 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: audit 2026-04-16T19:39:57.143722+0000 mgr.vm01.nwhpas (mgr.14227) 1064 : audit [DBG] from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: audit 2026-04-16T19:39:57.143722+0000 mgr.vm01.nwhpas (mgr.14227) 1064 : audit [DBG] from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: audit 2026-04-16T19:39:57.345384+0000 mgr.vm01.nwhpas (mgr.14227) 1065 : audit [DBG] from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm04.bfwsbq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: audit 2026-04-16T19:39:57.345384+0000 mgr.vm01.nwhpas (mgr.14227) 1065 : audit [DBG] from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm04.bfwsbq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: cephadm 2026-04-16T19:39:57.345778+0000 mgr.vm01.nwhpas (mgr.14227) 1066 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm04.bfwsbq 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: cephadm 2026-04-16T19:39:57.345778+0000 mgr.vm01.nwhpas (mgr.14227) 1066 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm04.bfwsbq 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: audit 2026-04-16T19:39:57.554118+0000 mgr.vm01.nwhpas (mgr.14227) 1067 : audit [DBG] from='client.16936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: audit 2026-04-16T19:39:57.554118+0000 mgr.vm01.nwhpas (mgr.14227) 1067 : audit [DBG] from='client.16936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: audit 2026-04-16T19:39:57.742500+0000 mgr.vm01.nwhpas (mgr.14227) 1068 : audit [DBG] from='client.16940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: audit 2026-04-16T19:39:57.742500+0000 mgr.vm01.nwhpas (mgr.14227) 1068 : audit [DBG] from='client.16940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: audit 2026-04-16T19:39:57.984863+0000 mon.vm01 (mon.0) 1392 : audit [DBG] from='client.? 192.168.123.101:0/1729928260' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:58.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:58 vm04 bash[34817]: audit 2026-04-16T19:39:57.984863+0000 mon.vm01 (mon.0) 1392 : audit [DBG] from='client.? 192.168.123.101:0/1729928260' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: audit 2026-04-16T19:39:57.143722+0000 mgr.vm01.nwhpas (mgr.14227) 1064 : audit [DBG] from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: audit 2026-04-16T19:39:57.143722+0000 mgr.vm01.nwhpas (mgr.14227) 1064 : audit [DBG] from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: audit 2026-04-16T19:39:57.345384+0000 mgr.vm01.nwhpas (mgr.14227) 1065 : audit [DBG] from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm04.bfwsbq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: audit 2026-04-16T19:39:57.345384+0000 mgr.vm01.nwhpas (mgr.14227) 1065 : audit [DBG] from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm04.bfwsbq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: cephadm 2026-04-16T19:39:57.345778+0000 mgr.vm01.nwhpas (mgr.14227) 1066 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm04.bfwsbq 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: cephadm 2026-04-16T19:39:57.345778+0000 mgr.vm01.nwhpas (mgr.14227) 1066 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm04.bfwsbq 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: audit 2026-04-16T19:39:57.554118+0000 mgr.vm01.nwhpas (mgr.14227) 1067 : audit [DBG] from='client.16936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: audit 2026-04-16T19:39:57.554118+0000 mgr.vm01.nwhpas (mgr.14227) 1067 : audit [DBG] from='client.16936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: audit 2026-04-16T19:39:57.742500+0000 mgr.vm01.nwhpas (mgr.14227) 1068 : audit [DBG] from='client.16940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: audit 2026-04-16T19:39:57.742500+0000 mgr.vm01.nwhpas (mgr.14227) 1068 : audit [DBG] from='client.16940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: audit 2026-04-16T19:39:57.984863+0000 mon.vm01 (mon.0) 1392 : audit [DBG] from='client.? 192.168.123.101:0/1729928260' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:39:59.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:58 vm01 bash[28222]: audit 2026-04-16T19:39:57.984863+0000 mon.vm01 (mon.0) 1392 : audit [DBG] from='client.? 192.168.123.101:0/1729928260' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:40:00.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:59 vm04 bash[34817]: cluster 2026-04-16T19:39:58.558319+0000 mgr.vm01.nwhpas (mgr.14227) 1069 : cluster [DBG] pgmap v594: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:40:00.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:39:59 vm04 bash[34817]: cluster 2026-04-16T19:39:58.558319+0000 mgr.vm01.nwhpas (mgr.14227) 1069 : cluster [DBG] pgmap v594: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:40:00.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:59 vm01 bash[28222]: cluster 2026-04-16T19:39:58.558319+0000 mgr.vm01.nwhpas (mgr.14227) 1069 : cluster [DBG] pgmap v594: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:40:00.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:39:59 vm01 bash[28222]: cluster 2026-04-16T19:39:58.558319+0000 mgr.vm01.nwhpas (mgr.14227) 1069 : cluster [DBG] pgmap v594: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:40:01.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:00 vm04 bash[34817]: cluster 2026-04-16T19:40:00.000080+0000 mon.vm01 (mon.0) 1393 : cluster [INF] overall HEALTH_OK 2026-04-16T19:40:01.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:00 vm04 bash[34817]: cluster 2026-04-16T19:40:00.000080+0000 mon.vm01 (mon.0) 1393 : cluster [INF] overall HEALTH_OK 2026-04-16T19:40:01.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:00 vm01 bash[28222]: cluster 2026-04-16T19:40:00.000080+0000 mon.vm01 (mon.0) 1393 : cluster [INF] overall HEALTH_OK 2026-04-16T19:40:01.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:00 vm01 bash[28222]: cluster 2026-04-16T19:40:00.000080+0000 mon.vm01 (mon.0) 1393 : cluster [INF] overall HEALTH_OK 2026-04-16T19:40:02.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:01 vm04 bash[34817]: cluster 2026-04-16T19:40:00.558745+0000 mgr.vm01.nwhpas (mgr.14227) 1070 : cluster [DBG] pgmap v595: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:02.461 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:01 vm04 bash[34817]: cluster 2026-04-16T19:40:00.558745+0000 mgr.vm01.nwhpas (mgr.14227) 1070 : cluster [DBG] pgmap v595: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:02.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:01 vm01 bash[28222]: cluster 2026-04-16T19:40:00.558745+0000 mgr.vm01.nwhpas (mgr.14227) 1070 : cluster [DBG] pgmap v595: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:02.463 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:01 vm01 bash[28222]: cluster 2026-04-16T19:40:00.558745+0000 mgr.vm01.nwhpas (mgr.14227) 1070 : cluster [DBG] pgmap v595: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:03.187 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm04.bfwsbq vm04 *:9000,9001 stopped 0s ago 16m - - 2026-04-16T19:40:03.191 INFO:teuthology.orchestra.run.vm01.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-16T19:40:03.191 INFO:teuthology.orchestra.run.vm01.stderr: Dload Upload Total Spent Left Speed 2026-04-16T19:40:03.192 INFO:teuthology.orchestra.run.vm01.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-16T19:40:03.373 INFO:teuthology.orchestra.run.vm01.stdout:anonymousScheduled to start haproxy.rgw.foo.vm04.bfwsbq on host 'vm04' 2026-04-16T19:40:03.582 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for haproxy.rgw.foo.vm04.bfwsbq to start 2026-04-16T19:40:03.761 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:40:03.761 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm01.fvwjhu vm01 *:9000,9001 running (17s) 11s ago 16m 3995k - 2.3.17-d1c9119 5479ac79e01f a67408202994 2026-04-16T19:40:03.761 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm04.bfwsbq vm04 *:9000,9001 stopped 1s ago 16m - - 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.542503+0000 mon.vm01 (mon.0) 1394 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.542503+0000 mon.vm01 (mon.0) 1394 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.547149+0000 mon.vm01 (mon.0) 1395 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.547149+0000 mon.vm01 (mon.0) 1395 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.547899+0000 mon.vm01 (mon.0) 1396 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.547899+0000 mon.vm01 (mon.0) 1396 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.548388+0000 mon.vm01 (mon.0) 1397 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.548388+0000 mon.vm01 (mon.0) 1397 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.551742+0000 mon.vm01 (mon.0) 1398 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.551742+0000 mon.vm01 (mon.0) 1398 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.553031+0000 mon.vm01 (mon.0) 1399 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:02.553031+0000 mon.vm01 (mon.0) 1399 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: cluster 2026-04-16T19:40:02.559580+0000 mgr.vm01.nwhpas (mgr.14227) 1071 : cluster [DBG] pgmap v596: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: cluster 2026-04-16T19:40:02.559580+0000 mgr.vm01.nwhpas (mgr.14227) 1071 : cluster [DBG] pgmap v596: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.362008+0000 mon.vm01 (mon.0) 1400 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.362008+0000 mon.vm01 (mon.0) 1400 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.912 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.367305+0000 mon.vm01 (mon.0) 1401 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.913 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.367305+0000 mon.vm01 (mon.0) 1401 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.913 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.368115+0000 mon.vm01 (mon.0) 1402 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:40:03.913 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.368115+0000 mon.vm01 (mon.0) 1402 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:40:03.913 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.369333+0000 mon.vm01 (mon.0) 1403 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:03.913 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.369333+0000 mon.vm01 (mon.0) 1403 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:03.913 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.369719+0000 mon.vm01 (mon.0) 1404 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:03.913 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.369719+0000 mon.vm01 (mon.0) 1404 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:03.913 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.373637+0000 mon.vm01 (mon.0) 1405 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.913 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.373637+0000 mon.vm01 (mon.0) 1405 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.913 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.375051+0000 mon.vm01 (mon.0) 1406 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:03.913 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:03 vm04 bash[34817]: audit 2026-04-16T19:40:03.375051+0000 mon.vm01 (mon.0) 1406 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:03.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.542503+0000 mon.vm01 (mon.0) 1394 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.542503+0000 mon.vm01 (mon.0) 1394 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.547149+0000 mon.vm01 (mon.0) 1395 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.963 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.547149+0000 mon.vm01 (mon.0) 1395 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.547899+0000 mon.vm01 (mon.0) 1396 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.547899+0000 mon.vm01 (mon.0) 1396 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.548388+0000 mon.vm01 (mon.0) 1397 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.548388+0000 mon.vm01 (mon.0) 1397 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.551742+0000 mon.vm01 (mon.0) 1398 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.551742+0000 mon.vm01 (mon.0) 1398 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.553031+0000 mon.vm01 (mon.0) 1399 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:02.553031+0000 mon.vm01 (mon.0) 1399 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: cluster 2026-04-16T19:40:02.559580+0000 mgr.vm01.nwhpas (mgr.14227) 1071 : cluster [DBG] pgmap v596: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: cluster 2026-04-16T19:40:02.559580+0000 mgr.vm01.nwhpas (mgr.14227) 1071 : cluster [DBG] pgmap v596: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.362008+0000 mon.vm01 (mon.0) 1400 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.362008+0000 mon.vm01 (mon.0) 1400 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.367305+0000 mon.vm01 (mon.0) 1401 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.367305+0000 mon.vm01 (mon.0) 1401 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.368115+0000 mon.vm01 (mon.0) 1402 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.368115+0000 mon.vm01 (mon.0) 1402 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.369333+0000 mon.vm01 (mon.0) 1403 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.369333+0000 mon.vm01 (mon.0) 1403 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.369719+0000 mon.vm01 (mon.0) 1404 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.369719+0000 mon.vm01 (mon.0) 1404 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.373637+0000 mon.vm01 (mon.0) 1405 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.373637+0000 mon.vm01 (mon.0) 1405 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.375051+0000 mon.vm01 (mon.0) 1406 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:03.964 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:03 vm01 bash[28222]: audit 2026-04-16T19:40:03.375051+0000 mon.vm01 (mon.0) 1406 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:03.982 INFO:teuthology.orchestra.run.vm01.stdout:HEALTH_OK 2026-04-16T19:40:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.165066+0000 mgr.vm01.nwhpas (mgr.14227) 1072 : audit [DBG] from='client.16948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.165066+0000 mgr.vm01.nwhpas (mgr.14227) 1072 : audit [DBG] from='client.16948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.355958+0000 mgr.vm01.nwhpas (mgr.14227) 1073 : audit [DBG] from='client.16952 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm04.bfwsbq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.355958+0000 mgr.vm01.nwhpas (mgr.14227) 1073 : audit [DBG] from='client.16952 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm04.bfwsbq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: cephadm 2026-04-16T19:40:03.356162+0000 mgr.vm01.nwhpas (mgr.14227) 1074 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm04.bfwsbq 2026-04-16T19:40:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: cephadm 2026-04-16T19:40:03.356162+0000 mgr.vm01.nwhpas (mgr.14227) 1074 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm04.bfwsbq 2026-04-16T19:40:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.560619+0000 mgr.vm01.nwhpas (mgr.14227) 1075 : audit [DBG] from='client.16956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.560619+0000 mgr.vm01.nwhpas (mgr.14227) 1075 : audit [DBG] from='client.16956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.752922+0000 mgr.vm01.nwhpas (mgr.14227) 1076 : audit [DBG] from='client.16960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.752922+0000 mgr.vm01.nwhpas (mgr.14227) 1076 : audit [DBG] from='client.16960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:04.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.928644+0000 mon.vm01 (mon.0) 1407 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:04.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.928644+0000 mon.vm01 (mon.0) 1407 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:04.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.933473+0000 mon.vm01 (mon.0) 1408 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:04.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.933473+0000 mon.vm01 (mon.0) 1408 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:04.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.934351+0000 mon.vm01 (mon.0) 1409 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:40:04.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.934351+0000 mon.vm01 (mon.0) 1409 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:40:04.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.976419+0000 mon.vm01 (mon.0) 1410 : audit [DBG] from='client.? 192.168.123.101:0/1814344593' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:40:04.962 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:04 vm04 bash[34817]: audit 2026-04-16T19:40:03.976419+0000 mon.vm01 (mon.0) 1410 : audit [DBG] from='client.? 192.168.123.101:0/1814344593' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.165066+0000 mgr.vm01.nwhpas (mgr.14227) 1072 : audit [DBG] from='client.16948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.165066+0000 mgr.vm01.nwhpas (mgr.14227) 1072 : audit [DBG] from='client.16948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.355958+0000 mgr.vm01.nwhpas (mgr.14227) 1073 : audit [DBG] from='client.16952 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm04.bfwsbq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.355958+0000 mgr.vm01.nwhpas (mgr.14227) 1073 : audit [DBG] from='client.16952 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm04.bfwsbq", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: cephadm 2026-04-16T19:40:03.356162+0000 mgr.vm01.nwhpas (mgr.14227) 1074 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm04.bfwsbq 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: cephadm 2026-04-16T19:40:03.356162+0000 mgr.vm01.nwhpas (mgr.14227) 1074 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm04.bfwsbq 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.560619+0000 mgr.vm01.nwhpas (mgr.14227) 1075 : audit [DBG] from='client.16956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.560619+0000 mgr.vm01.nwhpas (mgr.14227) 1075 : audit [DBG] from='client.16956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.752922+0000 mgr.vm01.nwhpas (mgr.14227) 1076 : audit [DBG] from='client.16960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.752922+0000 mgr.vm01.nwhpas (mgr.14227) 1076 : audit [DBG] from='client.16960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.928644+0000 mon.vm01 (mon.0) 1407 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.928644+0000 mon.vm01 (mon.0) 1407 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.933473+0000 mon.vm01 (mon.0) 1408 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.933473+0000 mon.vm01 (mon.0) 1408 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.934351+0000 mon.vm01 (mon.0) 1409 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.934351+0000 mon.vm01 (mon.0) 1409 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.976419+0000 mon.vm01 (mon.0) 1410 : audit [DBG] from='client.? 192.168.123.101:0/1814344593' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:40:05.214 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:04 vm01 bash[28222]: audit 2026-04-16T19:40:03.976419+0000 mon.vm01 (mon.0) 1410 : audit [DBG] from='client.? 192.168.123.101:0/1814344593' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-16T19:40:06.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:05 vm04 bash[34817]: cluster 2026-04-16T19:40:04.560063+0000 mgr.vm01.nwhpas (mgr.14227) 1077 : cluster [DBG] pgmap v597: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:40:06.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:05 vm04 bash[34817]: cluster 2026-04-16T19:40:04.560063+0000 mgr.vm01.nwhpas (mgr.14227) 1077 : cluster [DBG] pgmap v597: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:40:06.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:05 vm01 bash[28222]: cluster 2026-04-16T19:40:04.560063+0000 mgr.vm01.nwhpas (mgr.14227) 1077 : cluster [DBG] pgmap v597: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:40:06.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:05 vm01 bash[28222]: cluster 2026-04-16T19:40:04.560063+0000 mgr.vm01.nwhpas (mgr.14227) 1077 : cluster [DBG] pgmap v597: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-16T19:40:08.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:07 vm04 bash[34817]: cluster 2026-04-16T19:40:06.560545+0000 mgr.vm01.nwhpas (mgr.14227) 1078 : cluster [DBG] pgmap v598: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:08.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:07 vm04 bash[34817]: cluster 2026-04-16T19:40:06.560545+0000 mgr.vm01.nwhpas (mgr.14227) 1078 : cluster [DBG] pgmap v598: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:08.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:07 vm04 bash[34817]: audit 2026-04-16T19:40:07.588365+0000 mon.vm01 (mon.0) 1411 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:40:08.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:07 vm04 bash[34817]: audit 2026-04-16T19:40:07.588365+0000 mon.vm01 (mon.0) 1411 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:40:08.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:07 vm01 bash[28222]: cluster 2026-04-16T19:40:06.560545+0000 mgr.vm01.nwhpas (mgr.14227) 1078 : cluster [DBG] pgmap v598: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:08.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:07 vm01 bash[28222]: cluster 2026-04-16T19:40:06.560545+0000 mgr.vm01.nwhpas (mgr.14227) 1078 : cluster [DBG] pgmap v598: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:08.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:07 vm01 bash[28222]: audit 2026-04-16T19:40:07.588365+0000 mon.vm01 (mon.0) 1411 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:40:08.213 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:07 vm01 bash[28222]: audit 2026-04-16T19:40:07.588365+0000 mon.vm01 (mon.0) 1411 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-16T19:40:09.178 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm04.bfwsbq vm04 *:9000,9001 running (5s) 0s ago 16m 3919k - 2.3.17-d1c9119 5479ac79e01f b278505a9ebd 2026-04-16T19:40:09.182 INFO:teuthology.orchestra.run.vm01.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-16T19:40:09.182 INFO:teuthology.orchestra.run.vm01.stderr: Dload Upload Total Spent Left Speed 2026-04-16T19:40:09.183 INFO:teuthology.orchestra.run.vm01.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-16T19:40:09.254 INFO:teuthology.orchestra.run.vm01.stdout:anonymous 2026-04-16T19:40:09.254 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-16T19:40:09.256 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm01.local 2026-04-16T19:40:09.256 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'stat -c '"'"'%u %g'"'"' /var/log/ceph | grep '"'"'167 167'"'"'' 2026-04-16T19:40:09.495 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:40:09.576 INFO:teuthology.orchestra.run.vm01.stdout:167 167 2026-04-16T19:40:09.617 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch status' 2026-04-16T19:40:09.867 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: cluster 2026-04-16T19:40:08.560910+0000 mgr.vm01.nwhpas (mgr.14227) 1079 : cluster [DBG] pgmap v599: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: cluster 2026-04-16T19:40:08.560910+0000 mgr.vm01.nwhpas (mgr.14227) 1079 : cluster [DBG] pgmap v599: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.693059+0000 mon.vm01 (mon.0) 1412 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.693059+0000 mon.vm01 (mon.0) 1412 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.697671+0000 mon.vm01 (mon.0) 1413 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.697671+0000 mon.vm01 (mon.0) 1413 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.698451+0000 mon.vm01 (mon.0) 1414 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.698451+0000 mon.vm01 (mon.0) 1414 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.698989+0000 mon.vm01 (mon.0) 1415 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.698989+0000 mon.vm01 (mon.0) 1415 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.702729+0000 mon.vm01 (mon.0) 1416 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.702729+0000 mon.vm01 (mon.0) 1416 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.703973+0000 mon.vm01 (mon.0) 1417 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:09.899 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:09 vm01 bash[28222]: audit 2026-04-16T19:40:08.703973+0000 mon.vm01 (mon.0) 1417 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: cluster 2026-04-16T19:40:08.560910+0000 mgr.vm01.nwhpas (mgr.14227) 1079 : cluster [DBG] pgmap v599: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: cluster 2026-04-16T19:40:08.560910+0000 mgr.vm01.nwhpas (mgr.14227) 1079 : cluster [DBG] pgmap v599: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.693059+0000 mon.vm01 (mon.0) 1412 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.693059+0000 mon.vm01 (mon.0) 1412 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.697671+0000 mon.vm01 (mon.0) 1413 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.697671+0000 mon.vm01 (mon.0) 1413 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.698451+0000 mon.vm01 (mon.0) 1414 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.698451+0000 mon.vm01 (mon.0) 1414 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.698989+0000 mon.vm01 (mon.0) 1415 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.698989+0000 mon.vm01 (mon.0) 1415 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.702729+0000 mon.vm01 (mon.0) 1416 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.702729+0000 mon.vm01 (mon.0) 1416 : audit [INF] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.703973+0000 mon.vm01 (mon.0) 1417 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:09.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:09 vm04 bash[34817]: audit 2026-04-16T19:40:08.703973+0000 mon.vm01 (mon.0) 1417 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-16T19:40:10.212 INFO:teuthology.orchestra.run.vm01.stdout:Backend: cephadm 2026-04-16T19:40:10.212 INFO:teuthology.orchestra.run.vm01.stdout:Available: Yes 2026-04-16T19:40:10.213 INFO:teuthology.orchestra.run.vm01.stdout:Paused: No 2026-04-16T19:40:10.287 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch ps' 2026-04-16T19:40:10.533 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager.vm01 vm01 *:9093,9094 running (18m) 19s ago 19m 14.7M - 0.28.1 27c475db5fb1 f60e3350c3f7 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter.vm01 vm01 *:9926 running (19m) 19s ago 19m 10.2M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 540e4c218237 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter.vm04 vm04 *:9926 running (18m) 2s ago 18m 10.6M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 761f5560b6de 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:crash.vm01 vm01 running (19m) 19s ago 19m 10.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 ce9564d36680 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:crash.vm04 vm04 running (18m) 2s ago 18m 10.7M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 7f027725d823 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:grafana.vm01 vm01 *:3000 running (18m) 19s ago 18m 117M - 12.2.0 74144189b384 6b9d88a1a1b5 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm01.fvwjhu vm01 *:9000,9001 running (24s) 19s ago 16m 3995k - 2.3.17-d1c9119 5479ac79e01f a67408202994 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:haproxy.rgw.foo.vm04.bfwsbq vm04 *:9000,9001 running (6s) 2s ago 17m 3919k - 2.3.17-d1c9119 5479ac79e01f b278505a9ebd 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:keepalived.rgw.foo.vm01.mbrpom vm01 running (16m) 19s ago 16m 2471k - 2.2.4 93f9db46da49 15f522b4a4a0 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:keepalived.rgw.foo.vm04.bxakqk vm04 running (16m) 2s ago 16m 2499k - 2.2.4 93f9db46da49 1620b6bda485 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:mgr.vm01.nwhpas vm01 *:9283,8765,8443 running (20m) 19s ago 20m 556M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 90f8f68ae65c 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:mgr.vm04.ztqrcx vm04 *:8443,9283,8765 running (18m) 2s ago 18m 476M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 ae4f1b15769f 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:mon.vm01 vm01 running (20m) 19s ago 20m 67.9M 2048M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 7daade271a02 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:mon.vm04 vm04 running (18m) 2s ago 18m 47.7M 2048M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 42f903048de4 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.vm01 vm01 *:9100 running (19m) 19s ago 19m 8955k - 1.9.1 d00a542e409e 87b5ac70aa73 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter.vm04 vm04 *:9100 running (18m) 2s ago 18m 9056k - 1.9.1 d00a542e409e 1cb8d0b1bf17 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.0 vm04 running (17m) 2s ago 17m 64.6M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 73aed07d4ffd 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.1 vm01 running (17m) 19s ago 17m 68.6M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2b78a9d7ead0 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.2 vm04 running (17m) 2s ago 17m 62.1M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 d826511db51b 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.3 vm01 running (17m) 19s ago 17m 62.9M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 4638771ab976 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.4 vm04 running (17m) 2s ago 17m 65.7M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 159489c731a2 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.5 vm01 running (17m) 19s ago 17m 71.2M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 5d3c8841868a 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.6 vm04 running (17m) 2s ago 17m 64.9M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 cca037a32e13 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:osd.7 vm01 running (17m) 19s ago 17m 56.8M 4096M 20.2.0-21-gc03ba9ecf58 fc41d50a3963 9aae000c145b 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:prometheus.vm01 vm01 *:9095 running (16m) 19s ago 18m 51.0M - 3.6.0 76947e7ef22f 269d1218810c 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.pktgwy vm01 *:8000 running (16m) 19s ago 17m 135M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2cf161ff473a 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm01.qgurbb vm01 *:8001 running (10m) 19s ago 17m 128M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 eb0ffcec2d7b 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.rpimxa vm04 *:8001 running (5m) 2s ago 17m 118M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 2e1aad8bc122 2026-04-16T19:40:10.883 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo.vm04.uxumrv vm04 *:8000 running (39s) 2s ago 17m 96.0M - 20.2.0-21-gc03ba9ecf58 fc41d50a3963 3c3b97b27d67 2026-04-16T19:40:10.894 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:10 vm01 bash[28222]: audit 2026-04-16T19:40:09.156671+0000 mgr.vm01.nwhpas (mgr.14227) 1080 : audit [DBG] from='client.16968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:10.894 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:10 vm01 bash[28222]: audit 2026-04-16T19:40:09.156671+0000 mgr.vm01.nwhpas (mgr.14227) 1080 : audit [DBG] from='client.16968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:10.940 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch ls' 2026-04-16T19:40:10.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:10 vm04 bash[34817]: audit 2026-04-16T19:40:09.156671+0000 mgr.vm01.nwhpas (mgr.14227) 1080 : audit [DBG] from='client.16968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:10.961 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:10 vm04 bash[34817]: audit 2026-04-16T19:40:09.156671+0000 mgr.vm01.nwhpas (mgr.14227) 1080 : audit [DBG] from='client.16968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:11.183 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:40:11.532 INFO:teuthology.orchestra.run.vm01.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-04-16T19:40:11.533 INFO:teuthology.orchestra.run.vm01.stdout:alertmanager ?:9093,9094 1/1 19s ago 19m count:1 2026-04-16T19:40:11.533 INFO:teuthology.orchestra.run.vm01.stdout:ceph-exporter ?:9926 2/2 19s ago 19m * 2026-04-16T19:40:11.533 INFO:teuthology.orchestra.run.vm01.stdout:crash 2/2 19s ago 19m * 2026-04-16T19:40:11.533 INFO:teuthology.orchestra.run.vm01.stdout:grafana ?:3000 1/1 19s ago 19m count:1 2026-04-16T19:40:11.533 INFO:teuthology.orchestra.run.vm01.stdout:ingress.rgw.foo 12.12.1.101:9000,9001 4/4 19s ago 17m count:2 2026-04-16T19:40:11.533 INFO:teuthology.orchestra.run.vm01.stdout:mgr 2/2 19s ago 19m count:2 2026-04-16T19:40:11.533 INFO:teuthology.orchestra.run.vm01.stdout:mon 2/2 19s ago 19m vm01:192.168.123.101=vm01;vm04:192.168.123.104=vm04;count:2 2026-04-16T19:40:11.533 INFO:teuthology.orchestra.run.vm01.stdout:node-exporter ?:9100 2/2 19s ago 19m * 2026-04-16T19:40:11.533 INFO:teuthology.orchestra.run.vm01.stdout:osd.all-available-devices 8 19s ago 18m * 2026-04-16T19:40:11.533 INFO:teuthology.orchestra.run.vm01.stdout:prometheus ?:9095 1/1 19s ago 19m count:1 2026-04-16T19:40:11.533 INFO:teuthology.orchestra.run.vm01.stdout:rgw.foo ?:8000,8001 4/4 19s ago 17m count:4;* 2026-04-16T19:40:11.614 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch host ls' 2026-04-16T19:40:11.858 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:10.206924+0000 mgr.vm01.nwhpas (mgr.14227) 1081 : audit [DBG] from='client.16972 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:10.206924+0000 mgr.vm01.nwhpas (mgr.14227) 1081 : audit [DBG] from='client.16972 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: cluster 2026-04-16T19:40:10.561344+0000 mgr.vm01.nwhpas (mgr.14227) 1082 : cluster [DBG] pgmap v600: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: cluster 2026-04-16T19:40:10.561344+0000 mgr.vm01.nwhpas (mgr.14227) 1082 : cluster [DBG] pgmap v600: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:10.871774+0000 mgr.vm01.nwhpas (mgr.14227) 1083 : audit [DBG] from='client.16976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:10.871774+0000 mgr.vm01.nwhpas (mgr.14227) 1083 : audit [DBG] from='client.16976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:11.522946+0000 mon.vm01 (mon.0) 1418 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:11.522946+0000 mon.vm01 (mon.0) 1418 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:11.523786+0000 mon.vm01 (mon.0) 1419 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:11.523786+0000 mon.vm01 (mon.0) 1419 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:11.524215+0000 mon.vm01 (mon.0) 1420 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:11.524215+0000 mon.vm01 (mon.0) 1420 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:11.524622+0000 mon.vm01 (mon.0) 1421 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:11.892 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:11 vm01 bash[28222]: audit 2026-04-16T19:40:11.524622+0000 mon.vm01 (mon.0) 1421 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:10.206924+0000 mgr.vm01.nwhpas (mgr.14227) 1081 : audit [DBG] from='client.16972 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:10.206924+0000 mgr.vm01.nwhpas (mgr.14227) 1081 : audit [DBG] from='client.16972 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: cluster 2026-04-16T19:40:10.561344+0000 mgr.vm01.nwhpas (mgr.14227) 1082 : cluster [DBG] pgmap v600: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: cluster 2026-04-16T19:40:10.561344+0000 mgr.vm01.nwhpas (mgr.14227) 1082 : cluster [DBG] pgmap v600: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:10.871774+0000 mgr.vm01.nwhpas (mgr.14227) 1083 : audit [DBG] from='client.16976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:10.871774+0000 mgr.vm01.nwhpas (mgr.14227) 1083 : audit [DBG] from='client.16976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:11.522946+0000 mon.vm01 (mon.0) 1418 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:11.522946+0000 mon.vm01 (mon.0) 1418 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:11.523786+0000 mon.vm01 (mon.0) 1419 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:11.523786+0000 mon.vm01 (mon.0) 1419 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:11.524215+0000 mon.vm01 (mon.0) 1420 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:11.524215+0000 mon.vm01 (mon.0) 1420 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:11.524622+0000 mon.vm01 (mon.0) 1421 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:12.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:11 vm04 bash[34817]: audit 2026-04-16T19:40:11.524622+0000 mon.vm01 (mon.0) 1421 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:12.218 INFO:teuthology.orchestra.run.vm01.stdout:HOST ADDR LABELS STATUS 2026-04-16T19:40:12.218 INFO:teuthology.orchestra.run.vm01.stdout:vm01 192.168.123.101 2026-04-16T19:40:12.218 INFO:teuthology.orchestra.run.vm01.stdout:vm04 192.168.123.104 2026-04-16T19:40:12.218 INFO:teuthology.orchestra.run.vm01.stdout:2 hosts in cluster 2026-04-16T19:40:12.281 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch device ls' 2026-04-16T19:40:12.525 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme0n1 ssd Linux_f0ab57676a82334da227 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme1n1 ssd Linux_498a3ba4a5f7d3afa78b 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme2n1 ssd Linux_018d58381178c5541924 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/nvme3n1 ssd Linux_cb1f69ea567d81122781 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 17m ago Has a FileSystem, Insufficient space (<5GB) 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdb hdd DWNBRSTVMM01001 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdc hdd DWNBRSTVMM01002 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vdd hdd DWNBRSTVMM01003 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm01 /dev/vde hdd DWNBRSTVMM01004 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme0n1 ssd Linux_29b2bfa9f2fa89bb2491 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme1n1 ssd Linux_61c157a7b2d028903b27 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme2n1 ssd Linux_c8bfb12ab4da82fe6601 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/nvme3n1 ssd Linux_5c94f58d64ed1622992e 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 17m ago Has a FileSystem, Insufficient space (<5GB) 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vdb hdd DWNBRSTVMM04001 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vdc hdd DWNBRSTVMM04002 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vdd hdd DWNBRSTVMM04003 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.887 INFO:teuthology.orchestra.run.vm01.stdout:vm04 /dev/vde hdd DWNBRSTVMM04004 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-16T19:40:12.898 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:12 vm01 bash[28222]: audit 2026-04-16T19:40:11.522266+0000 mgr.vm01.nwhpas (mgr.14227) 1084 : audit [DBG] from='client.16980 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:12.898 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:12 vm01 bash[28222]: audit 2026-04-16T19:40:11.522266+0000 mgr.vm01.nwhpas (mgr.14227) 1084 : audit [DBG] from='client.16980 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:12.966 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- bash -c 'ceph orch ls | grep '"'"'^osd.all-available-devices '"'"'' 2026-04-16T19:40:13.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:12 vm04 bash[34817]: audit 2026-04-16T19:40:11.522266+0000 mgr.vm01.nwhpas (mgr.14227) 1084 : audit [DBG] from='client.16980 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:13.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:12 vm04 bash[34817]: audit 2026-04-16T19:40:11.522266+0000 mgr.vm01.nwhpas (mgr.14227) 1084 : audit [DBG] from='client.16980 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:13.212 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:40:13.573 INFO:teuthology.orchestra.run.vm01.stdout:osd.all-available-devices 8 21s ago 18m * 2026-04-16T19:40:13.653 DEBUG:teuthology.run_tasks:Unwinding manager vip 2026-04-16T19:40:13.656 INFO:tasks.vip:Removing 12.12.0.101 (and any VIPs) on vm01.local iface ens3... 2026-04-16T19:40:13.656 DEBUG:teuthology.orchestra.run.vm01:> sudo ip addr del 12.12.0.101/22 dev ens3 2026-04-16T19:40:13.667 DEBUG:teuthology.orchestra.run.vm01:> sudo ip addr del 12.12.1.101/22 dev ens3 2026-04-16T19:40:13.720 INFO:tasks.vip:Removing 12.12.0.104 (and any VIPs) on vm04.local iface ens3... 2026-04-16T19:40:13.721 DEBUG:teuthology.orchestra.run.vm04:> sudo ip addr del 12.12.0.104/22 dev ens3 2026-04-16T19:40:13.728 DEBUG:teuthology.orchestra.run.vm04:> sudo ip addr del 12.12.1.101/22 dev ens3 2026-04-16T19:40:13.776 INFO:teuthology.orchestra.run.vm04.stderr:RTNETLINK answers: Cannot assign requested address 2026-04-16T19:40:13.776 DEBUG:teuthology.orchestra.run:got remote process result: 2 2026-04-16T19:40:13.776 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-04-16T19:40:13.778 INFO:tasks.cephadm:Teardown begin 2026-04-16T19:40:13.778 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-16T19:40:13.786 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-16T19:40:13.824 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-04-16T19:40:13.824 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-7 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 -- ceph mgr module disable cephadm 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:12.211897+0000 mgr.vm01.nwhpas (mgr.14227) 1085 : audit [DBG] from='client.16984 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:12.211897+0000 mgr.vm01.nwhpas (mgr.14227) 1085 : audit [DBG] from='client.16984 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: cluster 2026-04-16T19:40:12.561789+0000 mgr.vm01.nwhpas (mgr.14227) 1086 : cluster [DBG] pgmap v601: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: cluster 2026-04-16T19:40:12.561789+0000 mgr.vm01.nwhpas (mgr.14227) 1086 : cluster [DBG] pgmap v601: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:12.879584+0000 mgr.vm01.nwhpas (mgr.14227) 1087 : audit [DBG] from='client.16988 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:12.879584+0000 mgr.vm01.nwhpas (mgr.14227) 1087 : audit [DBG] from='client.16988 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:13.553463+0000 mon.vm01 (mon.0) 1422 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:13.553463+0000 mon.vm01 (mon.0) 1422 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:13.554126+0000 mon.vm01 (mon.0) 1423 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:13.554126+0000 mon.vm01 (mon.0) 1423 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:13.554549+0000 mon.vm01 (mon.0) 1424 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:13.554549+0000 mon.vm01 (mon.0) 1424 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:13.554926+0000 mon.vm01 (mon.0) 1425 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:13.837 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:13 vm01 bash[28222]: audit 2026-04-16T19:40:13.554926+0000 mon.vm01 (mon.0) 1425 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:14.085 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/mon.vm01/config 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:12.211897+0000 mgr.vm01.nwhpas (mgr.14227) 1085 : audit [DBG] from='client.16984 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:12.211897+0000 mgr.vm01.nwhpas (mgr.14227) 1085 : audit [DBG] from='client.16984 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: cluster 2026-04-16T19:40:12.561789+0000 mgr.vm01.nwhpas (mgr.14227) 1086 : cluster [DBG] pgmap v601: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: cluster 2026-04-16T19:40:12.561789+0000 mgr.vm01.nwhpas (mgr.14227) 1086 : cluster [DBG] pgmap v601: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:12.879584+0000 mgr.vm01.nwhpas (mgr.14227) 1087 : audit [DBG] from='client.16988 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:12.879584+0000 mgr.vm01.nwhpas (mgr.14227) 1087 : audit [DBG] from='client.16988 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:13.553463+0000 mon.vm01 (mon.0) 1422 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:13.553463+0000 mon.vm01 (mon.0) 1422 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.pktgwy", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:13.554126+0000 mon.vm01 (mon.0) 1423 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:13.554126+0000 mon.vm01 (mon.0) 1423 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm01.qgurbb", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:13.554549+0000 mon.vm01 (mon.0) 1424 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:14.211 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:13.554549+0000 mon.vm01 (mon.0) 1424 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.rpimxa", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:14.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:13.554926+0000 mon.vm01 (mon.0) 1425 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:14.212 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:13 vm04 bash[34817]: audit 2026-04-16T19:40:13.554926+0000 mon.vm01 (mon.0) 1425 : audit [DBG] from='mgr.14227 192.168.123.101:0/3759710451' entity='mgr.vm01.nwhpas' cmd={"prefix": "config get", "who": "client.rgw.foo.vm04.uxumrv", "name": "rgw_frontends"} : dispatch 2026-04-16T19:40:14.317 INFO:teuthology.orchestra.run.vm01.stderr:2026-04-16T19:40:14.310+0000 7fe88c743640 -1 auth: error reading file: /etc/ceph/ceph.keyring: bufferlist::read_file(/etc/ceph/ceph.keyring): read error:(21) Is a directory 2026-04-16T19:40:14.317 INFO:teuthology.orchestra.run.vm01.stderr:2026-04-16T19:40:14.310+0000 7fe88c743640 -1 auth: failed to load /etc/ceph/ceph.keyring: (21) Is a directory 2026-04-16T19:40:14.317 INFO:teuthology.orchestra.run.vm01.stderr:2026-04-16T19:40:14.310+0000 7fe88c743640 -1 auth: error reading file: /etc/ceph/ceph.keyring: bufferlist::read_file(/etc/ceph/ceph.keyring): read error:(21) Is a directory 2026-04-16T19:40:14.317 INFO:teuthology.orchestra.run.vm01.stderr:2026-04-16T19:40:14.310+0000 7fe88c743640 -1 auth: failed to load /etc/ceph/ceph.keyring: (21) Is a directory 2026-04-16T19:40:14.317 INFO:teuthology.orchestra.run.vm01.stderr:2026-04-16T19:40:14.310+0000 7fe88c743640 -1 auth: error reading file: /etc/ceph/ceph.keyring: bufferlist::read_file(/etc/ceph/ceph.keyring): read error:(21) Is a directory 2026-04-16T19:40:14.317 INFO:teuthology.orchestra.run.vm01.stderr:2026-04-16T19:40:14.310+0000 7fe88c743640 -1 auth: failed to load /etc/ceph/ceph.keyring: (21) Is a directory 2026-04-16T19:40:14.317 INFO:teuthology.orchestra.run.vm01.stderr:2026-04-16T19:40:14.310+0000 7fe88c743640 -1 monclient: keyring not found 2026-04-16T19:40:14.320 INFO:teuthology.orchestra.run.vm01.stderr:[errno 21] error connecting to the cluster 2026-04-16T19:40:14.374 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-16T19:40:14.374 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-04-16T19:40:14.374 DEBUG:teuthology.orchestra.run.vm01:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-04-16T19:40:14.419 DEBUG:teuthology.orchestra.run.vm04:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-04-16T19:40:14.422 INFO:tasks.cephadm:Stopping all daemons... 2026-04-16T19:40:14.422 INFO:tasks.cephadm.mon.vm01:Stopping mon.vm01... 2026-04-16T19:40:14.422 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mon.vm01 2026-04-16T19:40:14.636 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:14 vm01 systemd[1]: Stopping Ceph mon.vm01 for 3711bb6a-39c9-11f1-9688-8928648d55a6... 2026-04-16T19:40:14.636 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:14 vm01 bash[28222]: debug 2026-04-16T19:40:14.498+0000 7f7e9527f640 -1 received signal: Terminated from /sbin/docker-init -- /usr/bin/ceph-mon -n mon.vm01 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-stderr=true --default-log-stderr-prefix=debug --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-stderr=true (PID: 1) UID: 0 2026-04-16T19:40:14.636 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 16 19:40:14 vm01 bash[28222]: debug 2026-04-16T19:40:14.498+0000 7f7e9527f640 -1 mon.vm01@0(leader) e2 *** Got Signal Terminated *** 2026-04-16T19:40:14.693 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mon.vm01.service' 2026-04-16T19:40:14.721 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-04-16T19:40:14.721 INFO:tasks.cephadm.mon.vm01:Stopped mon.vm01 2026-04-16T19:40:14.721 INFO:tasks.cephadm.mon.vm04:Stopping mon.vm04... 2026-04-16T19:40:14.721 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mon.vm04 2026-04-16T19:40:14.984 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:14 vm04 systemd[1]: Stopping Ceph mon.vm04 for 3711bb6a-39c9-11f1-9688-8928648d55a6... 2026-04-16T19:40:14.984 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:14 vm04 bash[34817]: debug 2026-04-16T19:40:14.758+0000 7f4c8f91a640 -1 received signal: Terminated from /sbin/docker-init -- /usr/bin/ceph-mon -n mon.vm04 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-stderr=true --default-log-stderr-prefix=debug --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-stderr=true (PID: 1) UID: 0 2026-04-16T19:40:14.984 INFO:journalctl@ceph.mon.vm04.vm04.stdout:Apr 16 19:40:14 vm04 bash[34817]: debug 2026-04-16T19:40:14.758+0000 7f4c8f91a640 -1 mon.vm04@1(peon) e2 *** Got Signal Terminated *** 2026-04-16T19:40:15.049 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3711bb6a-39c9-11f1-9688-8928648d55a6@mon.vm04.service' 2026-04-16T19:40:15.075 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-04-16T19:40:15.075 INFO:tasks.cephadm.mon.vm04:Stopped mon.vm04 2026-04-16T19:40:15.075 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 --force --keep-logs 2026-04-16T19:40:15.314 INFO:teuthology.orchestra.run.vm01.stdout:Deleting cluster with fsid: 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:41:04.680 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 --force --keep-logs 2026-04-16T19:41:04.917 INFO:teuthology.orchestra.run.vm04.stdout:Deleting cluster with fsid: 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:41:53.223 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-16T19:41:53.230 INFO:teuthology.orchestra.run.vm01.stderr:rm: cannot remove '/etc/ceph/ceph.client.admin.keyring': Is a directory 2026-04-16T19:41:53.231 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-16T19:41:53.231 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-16T19:41:53.238 INFO:tasks.cephadm:Archiving crash dumps... 2026-04-16T19:41:53.239 DEBUG:teuthology.misc:Transferring archived files from vm01:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/crash to /archive/supriti-2026-04-16_15:21:55-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5549/remote/vm01/crash 2026-04-16T19:41:53.239 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/crash -- . 2026-04-16T19:41:53.278 INFO:teuthology.orchestra.run.vm01.stderr:tar: /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/crash: Cannot open: No such file or directory 2026-04-16T19:41:53.278 INFO:teuthology.orchestra.run.vm01.stderr:tar: Error is not recoverable: exiting now 2026-04-16T19:41:53.279 DEBUG:teuthology.misc:Transferring archived files from vm04:/var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/crash to /archive/supriti-2026-04-16_15:21:55-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5549/remote/vm04/crash 2026-04-16T19:41:53.279 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/crash -- . 2026-04-16T19:41:53.290 INFO:teuthology.orchestra.run.vm04.stderr:tar: /var/lib/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/crash: Cannot open: No such file or directory 2026-04-16T19:41:53.290 INFO:teuthology.orchestra.run.vm04.stderr:tar: Error is not recoverable: exiting now 2026-04-16T19:41:53.290 INFO:tasks.cephadm:Checking cluster log for badness... 2026-04-16T19:41:53.290 DEBUG:teuthology.orchestra.run.vm01:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-04-16T19:41:53.328 INFO:tasks.cephadm:Compressing logs... 2026-04-16T19:41:53.328 DEBUG:teuthology.orchestra.run.vm01:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-04-16T19:41:53.373 DEBUG:teuthology.orchestra.run.vm04:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-04-16T19:41:53.379 INFO:teuthology.orchestra.run.vm01.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-04-16T19:41:53.379 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.audit.log 2026-04-16T19:41:53.380 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm01.qgurbb.log 2026-04-16T19:41:53.380 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.ceph-exporter.vm01.log 2026-04-16T19:41:53.381 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.7.log 2026-04-16T19:41:53.381 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm01.qgurbb.log: /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.audit.log: /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.ceph-exporter.vm01.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm01.pktgwy.log 2026-04-16T19:41:53.381 INFO:teuthology.orchestra.run.vm04.stderr:find: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.audit.log 2026-04-16T19:41:53.381 INFO:teuthology.orchestra.run.vm04.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-04-16T19:41:53.381 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mon.vm04.log 2026-04-16T19:41:53.381 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.7.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.3.log 2026-04-16T19:41:53.382 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.audit.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm04.uxumrv.log 2026-04-16T19:41:53.382 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mon.vm04.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.log 2026-04-16T19:41:53.382 INFO:teuthology.orchestra.run.vm01.stderr: 94.8% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.ceph-exporter.vm01.log.gz 2026-04-16T19:41:53.382 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm04.uxumrv.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.0.log 2026-04-16T19:41:53.383 INFO:teuthology.orchestra.run.vm01.stderr: 93.5% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm01.qgurbb.log.gz 2026-04-16T19:41:53.383 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.log 2026-04-16T19:41:53.383 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.cephadm.log 2026-04-16T19:41:53.384 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.log: 91.5% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.audit.log.gz 2026-04-16T19:41:53.384 INFO:teuthology.orchestra.run.vm04.stderr: 90.3% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.log.gz 2026-04-16T19:41:53.384 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.cephadm.log 2026-04-16T19:41:53.384 INFO:teuthology.orchestra.run.vm04.stderr: 93.5% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm04.uxumrv.log.gz/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.0.log: 2026-04-16T19:41:53.384 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.4.log 2026-04-16T19:41:53.384 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm01.pktgwy.log: /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.log: 91.3% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.audit.log.gz 2026-04-16T19:41:53.384 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-volume.log 2026-04-16T19:41:53.384 INFO:teuthology.orchestra.run.vm01.stderr: 90.2% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.log.gz 2026-04-16T19:41:53.385 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.cephadm.log: /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.4.log: 82.6% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.cephadm.log.gz 2026-04-16T19:41:53.385 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm04.rpimxa.log 2026-04-16T19:41:53.387 INFO:teuthology.orchestra.run.vm01.stderr: 92.3% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm01.pktgwy.log.gz 2026-04-16T19:41:53.395 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.ceph-exporter.vm04.log 2026-04-16T19:41:53.397 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-volume.log 2026-04-16T19:41:53.397 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.1.log 2026-04-16T19:41:53.397 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm04.rpimxa.log: 93.4% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm04.rpimxa.log.gz 2026-04-16T19:41:53.398 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.cephadm.log: /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-volume.log: 83.5% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph.cephadm.log.gz 2026-04-16T19:41:53.398 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mgr.vm01.nwhpas.log 2026-04-16T19:41:53.404 INFO:teuthology.orchestra.run.vm04.stderr: 96.2% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-volume.log.gz 2026-04-16T19:41:53.405 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.2.log 2026-04-16T19:41:53.406 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.ceph-exporter.vm04.log: 94.8% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.ceph-exporter.vm04.log.gz 2026-04-16T19:41:53.407 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.6.log 2026-04-16T19:41:53.408 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.5.log 2026-04-16T19:41:53.413 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mgr.vm01.nwhpas.log: 96.1% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-volume.log.gz 2026-04-16T19:41:53.416 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mon.vm01.log 2026-04-16T19:41:53.417 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm01.pktgwy.log 2026-04-16T19:41:53.419 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mgr.vm04.ztqrcx.log 2026-04-16T19:41:53.428 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mon.vm01.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm01.qgurbb.log 2026-04-16T19:41:53.431 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm01.pktgwy.log: 93.5% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ops-log-ceph-client.rgw.foo.vm01.pktgwy.log.gz 2026-04-16T19:41:53.435 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.6.log: gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm04.uxumrv.log 2026-04-16T19:41:53.439 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mgr.vm04.ztqrcx.log: 92.7% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mgr.vm04.ztqrcx.log.gz 2026-04-16T19:41:53.442 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm04.rpimxa.log 2026-04-16T19:41:53.448 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-04-16T19:41:53.451 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm04.uxumrv.log: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-04-16T19:41:53.451 INFO:teuthology.orchestra.run.vm04.stderr: 92.1% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm04.uxumrv.log.gz 2026-04-16T19:41:53.451 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm01.qgurbb.log: 92.2% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm01.qgurbb.log.gz 2026-04-16T19:41:53.460 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm04.rpimxa.log: /var/log/ceph/cephadm.log: 90.9% -- replaced with /var/log/ceph/cephadm.log.gz 2026-04-16T19:41:53.467 INFO:teuthology.orchestra.run.vm04.stderr: 92.0% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-client.rgw.foo.vm04.rpimxa.log.gz 2026-04-16T19:41:53.493 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/cephadm.log: 91.7% -- replaced with /var/log/ceph/cephadm.log.gz 2026-04-16T19:41:53.530 INFO:teuthology.orchestra.run.vm04.stderr: 92.8% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mon.vm04.log.gz 2026-04-16T19:41:53.773 INFO:teuthology.orchestra.run.vm01.stderr: 89.7% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mgr.vm01.nwhpas.log.gz 2026-04-16T19:41:53.860 INFO:teuthology.orchestra.run.vm01.stderr: 93.1% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.7.log.gz 2026-04-16T19:41:53.927 INFO:teuthology.orchestra.run.vm01.stderr: 90.8% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-mon.vm01.log.gz 2026-04-16T19:41:53.993 INFO:teuthology.orchestra.run.vm04.stderr: 93.4% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.2.log.gz 2026-04-16T19:41:54.025 INFO:teuthology.orchestra.run.vm04.stderr: 93.1% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.0.log.gz 2026-04-16T19:41:54.179 INFO:teuthology.orchestra.run.vm01.stderr: 93.1% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.5.log.gz 2026-04-16T19:41:54.204 INFO:teuthology.orchestra.run.vm04.stderr: 93.5% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.4.log.gz 2026-04-16T19:41:54.212 INFO:teuthology.orchestra.run.vm01.stderr: 93.5% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.3.log.gz 2026-04-16T19:41:54.234 INFO:teuthology.orchestra.run.vm04.stderr: 93.3% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.6.log.gz 2026-04-16T19:41:54.235 INFO:teuthology.orchestra.run.vm04.stderr: 2026-04-16T19:41:54.235 INFO:teuthology.orchestra.run.vm04.stderr:real 0m0.860s 2026-04-16T19:41:54.235 INFO:teuthology.orchestra.run.vm04.stderr:user 0m2.783s 2026-04-16T19:41:54.235 INFO:teuthology.orchestra.run.vm04.stderr:sys 0m0.153s 2026-04-16T19:41:54.414 INFO:teuthology.orchestra.run.vm01.stderr: 93.4% -- replaced with /var/log/ceph/3711bb6a-39c9-11f1-9688-8928648d55a6/ceph-osd.1.log.gz 2026-04-16T19:41:54.416 INFO:teuthology.orchestra.run.vm01.stderr: 2026-04-16T19:41:54.416 INFO:teuthology.orchestra.run.vm01.stderr:real 0m1.041s 2026-04-16T19:41:54.416 INFO:teuthology.orchestra.run.vm01.stderr:user 0m3.031s 2026-04-16T19:41:54.416 INFO:teuthology.orchestra.run.vm01.stderr:sys 0m0.157s 2026-04-16T19:41:54.416 INFO:tasks.cephadm:Archiving logs... 2026-04-16T19:41:54.416 DEBUG:teuthology.misc:Transferring archived files from vm01:/var/log/ceph to /archive/supriti-2026-04-16_15:21:55-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5549/remote/vm01/log 2026-04-16T19:41:54.416 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /var/log/ceph -- . 2026-04-16T19:41:54.622 DEBUG:teuthology.misc:Transferring archived files from vm04:/var/log/ceph to /archive/supriti-2026-04-16_15:21:55-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5549/remote/vm04/log 2026-04-16T19:41:54.622 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /var/log/ceph -- . 2026-04-16T19:41:54.771 INFO:tasks.cephadm:Removing cluster... 2026-04-16T19:41:54.771 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 --force 2026-04-16T19:41:55.017 INFO:teuthology.orchestra.run.vm01.stdout:Deleting cluster with fsid: 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:41:55.064 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 3711bb6a-39c9-11f1-9688-8928648d55a6 --force 2026-04-16T19:41:55.329 INFO:teuthology.orchestra.run.vm04.stdout:Deleting cluster with fsid: 3711bb6a-39c9-11f1-9688-8928648d55a6 2026-04-16T19:41:55.382 INFO:tasks.cephadm:Removing cephadm ... 2026-04-16T19:41:55.382 DEBUG:teuthology.orchestra.run.vm01:> rm -rf /home/ubuntu/cephtest/cephadm 2026-04-16T19:41:55.385 DEBUG:teuthology.orchestra.run.vm04:> rm -rf /home/ubuntu/cephtest/cephadm 2026-04-16T19:41:55.389 INFO:tasks.cephadm:Teardown complete 2026-04-16T19:41:55.389 DEBUG:teuthology.run_tasks:Unwinding manager nvme_loop 2026-04-16T19:41:55.440 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm01:/dev/vg_nvme/lv_1... 2026-04-16T19:41:55.441 DEBUG:teuthology.orchestra.run.vm01:> sudo nvme disconnect -n lv_1 2026-04-16T19:41:55.605 INFO:teuthology.orchestra.run.vm01.stdout:NQN:lv_1 disconnected 1 controller(s) 2026-04-16T19:41:55.606 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm01:/dev/vg_nvme/lv_2... 2026-04-16T19:41:55.606 DEBUG:teuthology.orchestra.run.vm01:> sudo nvme disconnect -n lv_2 2026-04-16T19:41:55.768 INFO:teuthology.orchestra.run.vm01.stdout:NQN:lv_2 disconnected 1 controller(s) 2026-04-16T19:41:55.797 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm01:/dev/vg_nvme/lv_3... 2026-04-16T19:41:55.797 DEBUG:teuthology.orchestra.run.vm01:> sudo nvme disconnect -n lv_3 2026-04-16T19:41:55.956 INFO:teuthology.orchestra.run.vm01.stdout:NQN:lv_3 disconnected 1 controller(s) 2026-04-16T19:41:55.958 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm01:/dev/vg_nvme/lv_4... 2026-04-16T19:41:55.958 DEBUG:teuthology.orchestra.run.vm01:> sudo nvme disconnect -n lv_4 2026-04-16T19:41:56.128 INFO:teuthology.orchestra.run.vm01.stdout:NQN:lv_4 disconnected 1 controller(s) 2026-04-16T19:41:56.130 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-16T19:41:56.130 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/scratch_devs 2026-04-16T19:41:56.137 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm04:/dev/vg_nvme/lv_1... 2026-04-16T19:41:56.137 DEBUG:teuthology.orchestra.run.vm04:> sudo nvme disconnect -n lv_1 2026-04-16T19:41:56.311 INFO:teuthology.orchestra.run.vm04.stdout:NQN:lv_1 disconnected 1 controller(s) 2026-04-16T19:41:56.312 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm04:/dev/vg_nvme/lv_2... 2026-04-16T19:41:56.312 DEBUG:teuthology.orchestra.run.vm04:> sudo nvme disconnect -n lv_2 2026-04-16T19:41:56.475 INFO:teuthology.orchestra.run.vm04.stdout:NQN:lv_2 disconnected 1 controller(s) 2026-04-16T19:41:56.476 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm04:/dev/vg_nvme/lv_3... 2026-04-16T19:41:56.476 DEBUG:teuthology.orchestra.run.vm04:> sudo nvme disconnect -n lv_3 2026-04-16T19:41:56.647 INFO:teuthology.orchestra.run.vm04.stdout:NQN:lv_3 disconnected 1 controller(s) 2026-04-16T19:41:56.648 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm04:/dev/vg_nvme/lv_4... 2026-04-16T19:41:56.648 DEBUG:teuthology.orchestra.run.vm04:> sudo nvme disconnect -n lv_4 2026-04-16T19:41:56.807 INFO:teuthology.orchestra.run.vm04.stdout:NQN:lv_4 disconnected 1 controller(s) 2026-04-16T19:41:56.808 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-04-16T19:41:56.808 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/scratch_devs 2026-04-16T19:41:56.816 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-04-16T19:41:56.819 INFO:teuthology.task.clock:Checking final clock skew... 2026-04-16T19:41:56.819 DEBUG:teuthology.orchestra.run.vm01:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-16T19:41:56.820 DEBUG:teuthology.orchestra.run.vm04:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout: remote refid st t when poll reach delay offset jitter 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout:============================================================================== 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout:-ctb01.martinmoe 87.63.200.138 2 u 101 128 377 31.606 +4.314 0.913 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout:#static.179.181. 178.179.247.128 3 u 97 128 377 23.570 +5.631 1.959 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout:+81.3.27.46 (ntp 194.58.204.196 2 u 99 128 377 27.497 +4.741 0.812 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout:*vps-nue1.orlean 195.145.119.188 2 u 101 128 377 28.305 +2.962 0.666 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout:-sv1.ggsrv.de 192.53.103.103 2 u 109 128 377 24.984 +3.936 1.038 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout:+netcup01.therav 189.97.54.122 2 u 93 128 377 28.276 +3.300 1.542 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout:+track.infra.9rc 141.144.246.224 5 u 25 128 377 28.269 +2.973 1.534 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout:-v22025082392863 129.69.253.1 2 u 22 128 377 28.282 +1.974 0.564 2026-04-16T19:41:57.099 INFO:teuthology.orchestra.run.vm01.stdout:-server1a.sim720 193.67.79.202 2 u 17 128 377 25.039 +5.712 0.849 2026-04-16T19:41:57.100 INFO:teuthology.orchestra.run.vm01.stdout:-tick.infra.9rc. 46.38.244.94 3 u 23 256 377 28.272 +1.188 1.028 2026-04-16T19:41:57.100 INFO:teuthology.orchestra.run.vm01.stdout:+185.125.190.56 146.131.121.246 2 u 128 128 377 34.400 +3.596 0.746 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout: remote refid st t when poll reach delay offset jitter 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:============================================================================== 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:-ctb01.martinmoe 87.63.200.138 2 u 22 64 377 34.573 +3.534 1.449 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:*185.232.69.65 ( .PHC0. 1 u 23 64 377 28.274 +3.021 1.256 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:+vps-nue1.orlean 195.145.119.188 2 u 33 64 377 28.258 +3.319 0.431 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:+81.3.27.46 (ntp 194.58.204.196 2 u 21 64 377 27.343 +4.794 1.179 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:-server1a.sim720 193.67.79.202 2 u 23 128 377 25.049 +5.396 0.451 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:#s7.vonderste.in 137.226.119.25 2 u 24 64 377 28.285 +3.012 2.650 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:-v22025082392863 129.69.253.1 2 u 26 64 377 28.686 +1.972 1.717 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:#tick.infra.9rc. 46.38.244.94 3 u 22 64 377 28.256 +0.728 1.532 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:-netcup01.therav 189.97.54.122 2 u 33 64 377 28.262 +2.916 0.247 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:-track.infra.9rc 141.144.246.224 5 u 20 64 377 28.682 +2.371 0.237 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:#sv1.ggsrv.de 192.53.103.103 2 u 23 64 377 24.975 +4.993 0.226 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:-stratum2-2.NTP. 129.70.137.82 2 u 12 64 377 30.162 +3.972 0.356 2026-04-16T19:41:57.164 INFO:teuthology.orchestra.run.vm04.stdout:+185.125.190.58 99.220.8.133 2 u 2 64 377 34.558 +3.266 0.187 2026-04-16T19:41:57.164 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-04-16T19:41:57.167 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-04-16T19:41:57.167 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-04-16T19:41:57.170 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-04-16T19:41:57.172 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-04-16T19:41:57.174 INFO:teuthology.task.internal:Duration was 1757.090682 seconds 2026-04-16T19:41:57.174 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-04-16T19:41:57.177 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-04-16T19:41:57.177 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-04-16T19:41:57.178 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-04-16T19:41:57.205 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-04-16T19:41:57.205 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm01.local 2026-04-16T19:41:57.206 DEBUG:teuthology.orchestra.run.vm01:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-04-16T19:41:57.262 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm04.local 2026-04-16T19:41:57.262 DEBUG:teuthology.orchestra.run.vm04:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-04-16T19:41:57.271 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-04-16T19:41:57.271 DEBUG:teuthology.orchestra.run.vm01:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-16T19:41:57.305 DEBUG:teuthology.orchestra.run.vm04:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-16T19:41:57.391 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-04-16T19:41:57.391 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-04-16T19:41:57.392 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-04-16T19:41:57.399 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-16T19:41:57.399 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-16T19:41:57.399 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-04-16T19:41:57.399 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-16T19:41:57.400 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-04-16T19:41:57.400 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-16T19:41:57.401 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-16T19:41:57.401 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-04-16T19:41:57.401 INFO:teuthology.orchestra.run.vm04.stderr: -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-16T19:41:57.401 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-04-16T19:41:57.414 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 92.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-04-16T19:41:57.418 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 90.9% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-04-16T19:41:57.419 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-04-16T19:41:57.422 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-04-16T19:41:57.422 DEBUG:teuthology.orchestra.run.vm01:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-04-16T19:41:57.468 DEBUG:teuthology.orchestra.run.vm04:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-04-16T19:41:57.477 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-04-16T19:41:57.480 DEBUG:teuthology.orchestra.run.vm01:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-04-16T19:41:57.513 DEBUG:teuthology.orchestra.run.vm04:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-04-16T19:41:57.520 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = core 2026-04-16T19:41:57.530 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = core 2026-04-16T19:41:57.538 DEBUG:teuthology.orchestra.run.vm01:> test -e /home/ubuntu/cephtest/archive/coredump 2026-04-16T19:41:57.571 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-16T19:41:57.571 DEBUG:teuthology.orchestra.run.vm04:> test -e /home/ubuntu/cephtest/archive/coredump 2026-04-16T19:41:57.581 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-16T19:41:57.581 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-04-16T19:41:57.585 INFO:teuthology.task.internal:Transferring archived files... 2026-04-16T19:41:57.585 DEBUG:teuthology.misc:Transferring archived files from vm01:/home/ubuntu/cephtest/archive to /archive/supriti-2026-04-16_15:21:55-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5549/remote/vm01 2026-04-16T19:41:57.585 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-04-16T19:41:57.620 DEBUG:teuthology.misc:Transferring archived files from vm04:/home/ubuntu/cephtest/archive to /archive/supriti-2026-04-16_15:21:55-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5549/remote/vm04 2026-04-16T19:41:57.620 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-04-16T19:41:57.632 INFO:teuthology.task.internal:Removing archive directory... 2026-04-16T19:41:57.632 DEBUG:teuthology.orchestra.run.vm01:> rm -rf -- /home/ubuntu/cephtest/archive 2026-04-16T19:41:57.665 DEBUG:teuthology.orchestra.run.vm04:> rm -rf -- /home/ubuntu/cephtest/archive 2026-04-16T19:41:57.678 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-04-16T19:41:57.681 INFO:teuthology.task.internal:Not uploading archives. 2026-04-16T19:41:57.681 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-04-16T19:41:57.684 INFO:teuthology.task.internal:Tidying up after the test... 2026-04-16T19:41:57.684 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-04-16T19:41:57.709 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-04-16T19:41:57.711 INFO:teuthology.orchestra.run.vm01.stdout: 258076 4 drwxr-xr-x 2 ubuntu ubuntu 4096 Apr 16 19:41 /home/ubuntu/cephtest 2026-04-16T19:41:57.726 INFO:teuthology.orchestra.run.vm04.stdout: 258068 4 drwxr-xr-x 2 ubuntu ubuntu 4096 Apr 16 19:41 /home/ubuntu/cephtest 2026-04-16T19:41:57.727 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-04-16T19:41:57.733 INFO:teuthology.run:Summary data: description: orch:cephadm:smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} duration: 1757.0906822681427 owner: supriti success: true 2026-04-16T19:41:57.733 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-04-16T19:41:57.750 INFO:teuthology.run:pass